##// END OF EJS Templates
narrow: detect if narrowspec was changed in a different share...
Martin von Zweigbergk -
r41072:ce0bc295 default
parent child Browse files
Show More
@@ -1,24 +1,21 b''
1 Integration with the share extension needs improvement. Right now
2 we've seen some odd bugs.
3
4 1 Address commentary in manifest.excludedmanifestrevlog.add -
5 2 specifically we should improve the collaboration with core so that
6 3 add() never gets called on an excluded directory and we can improve
7 4 the stand-in to raise a ProgrammingError.
8 5
9 6 Reason more completely about rename-filtering logic in
10 7 narrowfilelog. There could be some surprises lurking there.
11 8
12 9 Formally document the narrowspec format. For bonus points, unify with the
13 10 server-specified narrowspec format.
14 11
15 12 narrowrepo.setnarrowpats() or narrowspec.save() need to make sure
16 13 they're holding the wlock.
17 14
18 15 The follinwg places do an unrestricted dirstate walk (including files outside the
19 16 narrowspec). Some of them should perhaps not do that.
20 17
21 18 * debugfileset
22 19 * perfwalk
23 20 * sparse (but restricted to sparse config)
24 21 * largefiles
@@ -1,472 +1,481 b''
1 1 # narrowcommands.py - command modifications for narrowhg extension
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import itertools
10 10 import os
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial import (
14 14 bundle2,
15 15 cmdutil,
16 16 commands,
17 17 discovery,
18 18 encoding,
19 19 error,
20 20 exchange,
21 21 extensions,
22 22 hg,
23 23 merge,
24 24 narrowspec,
25 25 node,
26 26 pycompat,
27 27 registrar,
28 28 repair,
29 29 repository,
30 30 repoview,
31 31 sparse,
32 32 util,
33 33 wireprototypes,
34 34 )
35 35
36 36 table = {}
37 37 command = registrar.command(table)
38 38
39 39 def setup():
40 40 """Wraps user-facing mercurial commands with narrow-aware versions."""
41 41
42 42 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
43 43 entry[1].append(('', 'narrow', None,
44 44 _("create a narrow clone of select files")))
45 45 entry[1].append(('', 'depth', '',
46 46 _("limit the history fetched by distance from heads")))
47 47 entry[1].append(('', 'narrowspec', '',
48 48 _("read narrowspecs from file")))
49 49 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
50 50 if 'sparse' not in extensions.enabled():
51 51 entry[1].append(('', 'include', [],
52 52 _("specifically fetch this file/directory")))
53 53 entry[1].append(
54 54 ('', 'exclude', [],
55 55 _("do not fetch this file/directory, even if included")))
56 56
57 57 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
58 58 entry[1].append(('', 'depth', '',
59 59 _("limit the history fetched by distance from heads")))
60 60
61 61 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
62 62
63 63 def clonenarrowcmd(orig, ui, repo, *args, **opts):
64 64 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
65 65 opts = pycompat.byteskwargs(opts)
66 66 wrappedextraprepare = util.nullcontextmanager()
67 67 narrowspecfile = opts['narrowspec']
68 68
69 69 if narrowspecfile:
70 70 filepath = os.path.join(encoding.getcwd(), narrowspecfile)
71 71 ui.status(_("reading narrowspec from '%s'\n") % filepath)
72 72 try:
73 73 fdata = util.readfile(filepath)
74 74 except IOError as inst:
75 75 raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
76 76 (filepath, encoding.strtolocal(inst.strerror)))
77 77
78 78 includes, excludes, profiles = sparse.parseconfig(ui, fdata, 'narrow')
79 79 if profiles:
80 80 raise error.Abort(_("cannot specify other files using '%include' in"
81 81 " narrowspec"))
82 82
83 83 narrowspec.validatepatterns(includes)
84 84 narrowspec.validatepatterns(excludes)
85 85
86 86 # narrowspec is passed so we should assume that user wants narrow clone
87 87 opts['narrow'] = True
88 88 opts['include'].extend(includes)
89 89 opts['exclude'].extend(excludes)
90 90
91 91 if opts['narrow']:
92 92 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
93 93 orig(pullop, kwargs)
94 94
95 95 if opts.get('depth'):
96 96 kwargs['depth'] = opts['depth']
97 97 wrappedextraprepare = extensions.wrappedfunction(exchange,
98 98 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
99 99
100 100 with wrappedextraprepare:
101 101 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
102 102
103 103 def pullnarrowcmd(orig, ui, repo, *args, **opts):
104 104 """Wraps pull command to allow modifying narrow spec."""
105 105 wrappedextraprepare = util.nullcontextmanager()
106 106 if repository.NARROW_REQUIREMENT in repo.requirements:
107 107
108 108 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
109 109 orig(pullop, kwargs)
110 110 if opts.get(r'depth'):
111 111 kwargs['depth'] = opts[r'depth']
112 112 wrappedextraprepare = extensions.wrappedfunction(exchange,
113 113 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
114 114
115 115 with wrappedextraprepare:
116 116 return orig(ui, repo, *args, **opts)
117 117
118 118 def archivenarrowcmd(orig, ui, repo, *args, **opts):
119 119 """Wraps archive command to narrow the default includes."""
120 120 if repository.NARROW_REQUIREMENT in repo.requirements:
121 121 repo_includes, repo_excludes = repo.narrowpats
122 122 includes = set(opts.get(r'include', []))
123 123 excludes = set(opts.get(r'exclude', []))
124 124 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
125 125 includes, excludes, repo_includes, repo_excludes)
126 126 if includes:
127 127 opts[r'include'] = includes
128 128 if excludes:
129 129 opts[r'exclude'] = excludes
130 130 return orig(ui, repo, *args, **opts)
131 131
132 132 def pullbundle2extraprepare(orig, pullop, kwargs):
133 133 repo = pullop.repo
134 134 if repository.NARROW_REQUIREMENT not in repo.requirements:
135 135 return orig(pullop, kwargs)
136 136
137 137 if wireprototypes.NARROWCAP not in pullop.remote.capabilities():
138 138 raise error.Abort(_("server does not support narrow clones"))
139 139 orig(pullop, kwargs)
140 140 kwargs['narrow'] = True
141 141 include, exclude = repo.narrowpats
142 142 kwargs['oldincludepats'] = include
143 143 kwargs['oldexcludepats'] = exclude
144 144 if include:
145 145 kwargs['includepats'] = include
146 146 if exclude:
147 147 kwargs['excludepats'] = exclude
148 148 # calculate known nodes only in ellipses cases because in non-ellipses cases
149 149 # we have all the nodes
150 150 if wireprototypes.ELLIPSESCAP in pullop.remote.capabilities():
151 151 kwargs['known'] = [node.hex(ctx.node()) for ctx in
152 152 repo.set('::%ln', pullop.common)
153 153 if ctx.node() != node.nullid]
154 154 if not kwargs['known']:
155 155 # Mercurial serializes an empty list as '' and deserializes it as
156 156 # [''], so delete it instead to avoid handling the empty string on
157 157 # the server.
158 158 del kwargs['known']
159 159
160 160 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
161 161 pullbundle2extraprepare)
162 162
163 163 # This is an extension point for filesystems that need to do something other
164 164 # than just blindly unlink the files. It's not clear what arguments would be
165 165 # useful, so we're passing in a fair number of them, some of them redundant.
166 166 def _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes, newexcludes,
167 167 oldmatch, newmatch):
168 168 for f in repo.dirstate:
169 169 if not newmatch(f):
170 170 repo.dirstate.drop(f)
171 171 repo.wvfs.unlinkpath(f)
172 172
173 173 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
174 174 newincludes, newexcludes, force):
175 175 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
176 176 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
177 177
178 178 # This is essentially doing "hg outgoing" to find all local-only
179 179 # commits. We will then check that the local-only commits don't
180 180 # have any changes to files that will be untracked.
181 181 unfi = repo.unfiltered()
182 182 outgoing = discovery.findcommonoutgoing(unfi, remote,
183 183 commoninc=commoninc)
184 184 ui.status(_('looking for local changes to affected paths\n'))
185 185 localnodes = []
186 186 for n in itertools.chain(outgoing.missing, outgoing.excluded):
187 187 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
188 188 localnodes.append(n)
189 189 revstostrip = unfi.revs('descendants(%ln)', localnodes)
190 190 hiddenrevs = repoview.filterrevs(repo, 'visible')
191 191 visibletostrip = list(repo.changelog.node(r)
192 192 for r in (revstostrip - hiddenrevs))
193 193 if visibletostrip:
194 194 ui.status(_('The following changeset(s) or their ancestors have '
195 195 'local changes not on the remote:\n'))
196 196 maxnodes = 10
197 197 if ui.verbose or len(visibletostrip) <= maxnodes:
198 198 for n in visibletostrip:
199 199 ui.status('%s\n' % node.short(n))
200 200 else:
201 201 for n in visibletostrip[:maxnodes]:
202 202 ui.status('%s\n' % node.short(n))
203 203 ui.status(_('...and %d more, use --verbose to list all\n') %
204 204 (len(visibletostrip) - maxnodes))
205 205 if not force:
206 206 raise error.Abort(_('local changes found'),
207 207 hint=_('use --force-delete-local-changes to '
208 208 'ignore'))
209 209
210 210 with ui.uninterruptable():
211 211 if revstostrip:
212 212 tostrip = [unfi.changelog.node(r) for r in revstostrip]
213 213 if repo['.'].node() in tostrip:
214 214 # stripping working copy, so move to a different commit first
215 215 urev = max(repo.revs('(::%n) - %ln + null',
216 216 repo['.'].node(), visibletostrip))
217 217 hg.clean(repo, urev)
218 218 overrides = {('devel', 'strip-obsmarkers'): False}
219 219 with ui.configoverride(overrides, 'narrow'):
220 220 repair.strip(ui, unfi, tostrip, topic='narrow')
221 221
222 222 todelete = []
223 223 for f, f2, size in repo.store.datafiles():
224 224 if f.startswith('data/'):
225 225 file = f[5:-2]
226 226 if not newmatch(file):
227 227 todelete.append(f)
228 228 elif f.startswith('meta/'):
229 229 dir = f[5:-13]
230 230 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
231 231 include = True
232 232 for d in dirs:
233 233 visit = newmatch.visitdir(d)
234 234 if not visit:
235 235 include = False
236 236 break
237 237 if visit == 'all':
238 238 break
239 239 if not include:
240 240 todelete.append(f)
241 241
242 242 repo.destroying()
243 243
244 244 with repo.transaction("narrowing"):
245 245 # Update narrowspec before removing revlogs, so repo won't be
246 246 # corrupt in case of crash
247 247 repo.setnarrowpats(newincludes, newexcludes)
248 248
249 249 for f in todelete:
250 250 ui.status(_('deleting %s\n') % f)
251 251 util.unlinkpath(repo.svfs.join(f))
252 252 repo.store.markremoved(f)
253 253
254 254 _narrowcleanupwdir(repo, oldincludes, oldexcludes, newincludes,
255 255 newexcludes, oldmatch, newmatch)
256 256
257 257 repo.destroyed()
258 258
259 259 def _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
260 260 newincludes, newexcludes):
261 261 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
262 262
263 263 # for now we assume that if a server has ellipses enabled, we will be
264 264 # exchanging ellipses nodes. In future we should add ellipses as a client
265 265 # side requirement (maybe) to distinguish a client is shallow or not and
266 266 # then send that information to server whether we want ellipses or not.
267 267 # Theoretically a non-ellipses repo should be able to use narrow
268 268 # functionality from an ellipses enabled server
269 269 ellipsesremote = wireprototypes.ELLIPSESCAP in remote.capabilities()
270 270
271 271 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
272 272 orig(pullop, kwargs)
273 273 # The old{in,ex}cludepats have already been set by orig()
274 274 kwargs['includepats'] = newincludes
275 275 kwargs['excludepats'] = newexcludes
276 276 wrappedextraprepare = extensions.wrappedfunction(exchange,
277 277 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
278 278
279 279 # define a function that narrowbundle2 can call after creating the
280 280 # backup bundle, but before applying the bundle from the server
281 281 def setnewnarrowpats():
282 282 repo.setnarrowpats(newincludes, newexcludes)
283 283 repo.setnewnarrowpats = setnewnarrowpats
284 284 # silence the devel-warning of applying an empty changegroup
285 285 overrides = {('devel', 'all-warnings'): False}
286 286
287 287 with ui.uninterruptable():
288 288 common = commoninc[0]
289 289 if ellipsesremote:
290 290 ds = repo.dirstate
291 291 p1, p2 = ds.p1(), ds.p2()
292 292 with ds.parentchange():
293 293 ds.setparents(node.nullid, node.nullid)
294 294 with wrappedextraprepare,\
295 295 repo.ui.configoverride(overrides, 'widen'):
296 296 exchange.pull(repo, remote, heads=common)
297 297 with ds.parentchange():
298 298 ds.setparents(p1, p2)
299 299 else:
300 300 with remote.commandexecutor() as e:
301 301 bundle = e.callcommand('narrow_widen', {
302 302 'oldincludes': oldincludes,
303 303 'oldexcludes': oldexcludes,
304 304 'newincludes': newincludes,
305 305 'newexcludes': newexcludes,
306 306 'cgversion': '03',
307 307 'commonheads': common,
308 308 'known': [],
309 309 'ellipses': False,
310 310 }).result()
311 311
312 312 with repo.transaction('widening') as tr,\
313 313 repo.ui.configoverride(overrides, 'widen'):
314 314 tgetter = lambda: tr
315 315 bundle2.processbundle(repo, bundle,
316 316 transactiongetter=tgetter)
317 317
318 318 repo.setnewnarrowpats()
319 319 actions = merge.emptyactions()
320 320 addgaction = actions['g'].append
321 321
322 322 mf = repo['.'].manifest().matches(newmatch)
323 323 for f, fn in mf.iteritems():
324 324 if f not in repo.dirstate:
325 325 addgaction((f, (mf.flags(f), False),
326 326 "add from widened narrow clone"))
327 327
328 328 merge.applyupdates(repo, actions, wctx=repo[None],
329 329 mctx=repo['.'], overwrite=False)
330 330 merge.recordupdates(repo, actions, branchmerge=False)
331 331
332 332 # TODO(rdamazio): Make new matcher format and update description
333 333 @command('tracked',
334 334 [('', 'addinclude', [], _('new paths to include')),
335 335 ('', 'removeinclude', [], _('old paths to no longer include')),
336 336 ('', 'addexclude', [], _('new paths to exclude')),
337 337 ('', 'import-rules', '', _('import narrowspecs from a file')),
338 338 ('', 'removeexclude', [], _('old paths to no longer exclude')),
339 339 ('', 'clear', False, _('whether to replace the existing narrowspec')),
340 340 ('', 'force-delete-local-changes', False,
341 341 _('forces deletion of local changes when narrowing')),
342 ('', 'update-working-copy', False,
343 _('update working copy when the store has changed')),
342 344 ] + commands.remoteopts,
343 345 _('[OPTIONS]... [REMOTE]'),
344 346 inferrepo=True)
345 347 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
346 348 """show or change the current narrowspec
347 349
348 350 With no argument, shows the current narrowspec entries, one per line. Each
349 351 line will be prefixed with 'I' or 'X' for included or excluded patterns,
350 352 respectively.
351 353
352 354 The narrowspec is comprised of expressions to match remote files and/or
353 355 directories that should be pulled into your client.
354 356 The narrowspec has *include* and *exclude* expressions, with excludes always
355 357 trumping includes: that is, if a file matches an exclude expression, it will
356 358 be excluded even if it also matches an include expression.
357 359 Excluding files that were never included has no effect.
358 360
359 361 Each included or excluded entry is in the format described by
360 362 'hg help patterns'.
361 363
362 364 The options allow you to add or remove included and excluded expressions.
363 365
364 366 If --clear is specified, then all previous includes and excludes are DROPPED
365 367 and replaced by the new ones specified to --addinclude and --addexclude.
366 368 If --clear is specified without any further options, the narrowspec will be
367 369 empty and will not match any files.
368 370 """
369 371 opts = pycompat.byteskwargs(opts)
370 372 if repository.NARROW_REQUIREMENT not in repo.requirements:
371 373 raise error.Abort(_('the narrow command is only supported on '
372 374 'respositories cloned with --narrow'))
373 375
374 376 # Before supporting, decide whether it "hg tracked --clear" should mean
375 377 # tracking no paths or all paths.
376 378 if opts['clear']:
377 379 raise error.Abort(_('the --clear option is not yet supported'))
378 380
379 381 # import rules from a file
380 382 newrules = opts.get('import_rules')
381 383 if newrules:
382 384 try:
383 385 filepath = os.path.join(encoding.getcwd(), newrules)
384 386 fdata = util.readfile(filepath)
385 387 except IOError as inst:
386 388 raise error.Abort(_("cannot read narrowspecs from '%s': %s") %
387 389 (filepath, encoding.strtolocal(inst.strerror)))
388 390 includepats, excludepats, profiles = sparse.parseconfig(ui, fdata,
389 391 'narrow')
390 392 if profiles:
391 393 raise error.Abort(_("including other spec files using '%include' "
392 394 "is not supported in narrowspec"))
393 395 opts['addinclude'].extend(includepats)
394 396 opts['addexclude'].extend(excludepats)
395 397
396 398 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
397 399 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
398 400 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
399 401 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
400 402
403 update_working_copy = opts['update_working_copy']
401 404 only_show = not (addedincludes or removedincludes or addedexcludes or
402 removedexcludes or newrules)
405 removedexcludes or newrules or update_working_copy)
403 406
404 407 oldincludes, oldexcludes = repo.narrowpats
405 408
406 409 # filter the user passed additions and deletions into actual additions and
407 410 # deletions of excludes and includes
408 411 addedincludes -= oldincludes
409 412 removedincludes &= oldincludes
410 413 addedexcludes -= oldexcludes
411 414 removedexcludes &= oldexcludes
412 415
413 416 widening = addedincludes or removedexcludes
414 417 narrowing = removedincludes or addedexcludes
415 418
416 419 # Only print the current narrowspec.
417 420 if only_show:
418 421 ui.pager('tracked')
419 422 fm = ui.formatter('narrow', opts)
420 423 for i in sorted(oldincludes):
421 424 fm.startitem()
422 425 fm.write('status', '%s ', 'I', label='narrow.included')
423 426 fm.write('pat', '%s\n', i, label='narrow.included')
424 427 for i in sorted(oldexcludes):
425 428 fm.startitem()
426 429 fm.write('status', '%s ', 'X', label='narrow.excluded')
427 430 fm.write('pat', '%s\n', i, label='narrow.excluded')
428 431 fm.end()
429 432 return 0
430 433
434 if update_working_copy:
435 with repo.wlock(), repo.lock(), repo.transaction('narrow-wc') as tr:
436 narrowspec.updateworkingcopy(repo, tr)
437 narrowspec.copytoworkingcopy(repo, tr)
438 return 0
439
431 440 if not widening and not narrowing:
432 441 ui.status(_("nothing to widen or narrow\n"))
433 442 return 0
434 443
435 444 with repo.wlock(), repo.lock():
436 445 cmdutil.bailifchanged(repo)
437 446
438 447 # Find the revisions we have in common with the remote. These will
439 448 # be used for finding local-only changes for narrowing. They will
440 449 # also define the set of revisions to update for widening.
441 450 remotepath = ui.expandpath(remotepath or 'default')
442 451 url, branches = hg.parseurl(remotepath)
443 452 ui.status(_('comparing with %s\n') % util.hidepassword(url))
444 453 remote = hg.peer(repo, opts, url)
445 454
446 455 # check narrow support before doing anything if widening needs to be
447 456 # performed. In future we should also abort if client is ellipses and
448 457 # server does not support ellipses
449 458 if widening and wireprototypes.NARROWCAP not in remote.capabilities():
450 459 raise error.Abort(_("server does not support narrow clones"))
451 460
452 461 commoninc = discovery.findcommonincoming(repo, remote)
453 462
454 463 if narrowing:
455 464 newincludes = oldincludes - removedincludes
456 465 newexcludes = oldexcludes | addedexcludes
457 466 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
458 467 newincludes, newexcludes,
459 468 opts['force_delete_local_changes'])
460 469 # _narrow() updated the narrowspec and _widen() below needs to
461 470 # use the updated values as its base (otherwise removed includes
462 471 # and addedexcludes will be lost in the resulting narrowspec)
463 472 oldincludes = newincludes
464 473 oldexcludes = newexcludes
465 474
466 475 if widening:
467 476 newincludes = oldincludes | addedincludes
468 477 newexcludes = oldexcludes - removedexcludes
469 478 _widen(ui, repo, remote, commoninc, oldincludes, oldexcludes,
470 479 newincludes, newexcludes)
471 480
472 481 return 0
@@ -1,1225 +1,1229 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15 import stat
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 nullid,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 bundlerepo,
25 25 cacheutil,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 httppeer,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 logexchange,
37 37 merge as mergemod,
38 38 narrowspec,
39 39 node,
40 40 phases,
41 repository as repositorymod,
41 42 scmutil,
42 43 sshpeer,
43 44 statichttprepo,
44 45 ui as uimod,
45 46 unionrepo,
46 47 url,
47 48 util,
48 49 verify as verifymod,
49 50 vfs as vfsmod,
50 51 )
51 52
52 53 release = lock.release
53 54
54 55 # shared features
55 56 sharedbookmarks = 'bookmarks'
56 57
57 58 def _local(path):
58 59 path = util.expandpath(util.urllocalpath(path))
59 60 return (os.path.isfile(path) and bundlerepo or localrepo)
60 61
61 62 def addbranchrevs(lrepo, other, branches, revs):
62 63 peer = other.peer() # a courtesy to callers using a localrepo for other
63 64 hashbranch, branches = branches
64 65 if not hashbranch and not branches:
65 66 x = revs or None
66 67 if revs:
67 68 y = revs[0]
68 69 else:
69 70 y = None
70 71 return x, y
71 72 if revs:
72 73 revs = list(revs)
73 74 else:
74 75 revs = []
75 76
76 77 if not peer.capable('branchmap'):
77 78 if branches:
78 79 raise error.Abort(_("remote branch lookup not supported"))
79 80 revs.append(hashbranch)
80 81 return revs, revs[0]
81 82
82 83 with peer.commandexecutor() as e:
83 84 branchmap = e.callcommand('branchmap', {}).result()
84 85
85 86 def primary(branch):
86 87 if branch == '.':
87 88 if not lrepo:
88 89 raise error.Abort(_("dirstate branch not accessible"))
89 90 branch = lrepo.dirstate.branch()
90 91 if branch in branchmap:
91 92 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 93 return True
93 94 else:
94 95 return False
95 96
96 97 for branch in branches:
97 98 if not primary(branch):
98 99 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 100 if hashbranch:
100 101 if not primary(hashbranch):
101 102 revs.append(hashbranch)
102 103 return revs, revs[0]
103 104
104 105 def parseurl(path, branches=None):
105 106 '''parse url#branch, returning (url, (branch, branches))'''
106 107
107 108 u = util.url(path)
108 109 branch = None
109 110 if u.fragment:
110 111 branch = u.fragment
111 112 u.fragment = None
112 113 return bytes(u), (branch, branches or [])
113 114
114 115 schemes = {
115 116 'bundle': bundlerepo,
116 117 'union': unionrepo,
117 118 'file': _local,
118 119 'http': httppeer,
119 120 'https': httppeer,
120 121 'ssh': sshpeer,
121 122 'static-http': statichttprepo,
122 123 }
123 124
124 125 def _peerlookup(path):
125 126 u = util.url(path)
126 127 scheme = u.scheme or 'file'
127 128 thing = schemes.get(scheme) or schemes['file']
128 129 try:
129 130 return thing(path)
130 131 except TypeError:
131 132 # we can't test callable(thing) because 'thing' can be an unloaded
132 133 # module that implements __call__
133 134 if not util.safehasattr(thing, 'instance'):
134 135 raise
135 136 return thing
136 137
137 138 def islocal(repo):
138 139 '''return true if repo (or path pointing to repo) is local'''
139 140 if isinstance(repo, bytes):
140 141 try:
141 142 return _peerlookup(repo).islocal(repo)
142 143 except AttributeError:
143 144 return False
144 145 return repo.local()
145 146
146 147 def openpath(ui, path):
147 148 '''open path with open if local, url.open if remote'''
148 149 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 150 if pathurl.islocal():
150 151 return util.posixfile(pathurl.localpath(), 'rb')
151 152 else:
152 153 return url.open(ui, path)
153 154
154 155 # a list of (ui, repo) functions called for wire peer initialization
155 156 wirepeersetupfuncs = []
156 157
157 158 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 159 intents=None, createopts=None):
159 160 """return a repository object for the specified path"""
160 161 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 162 createopts=createopts)
162 163 ui = getattr(obj, "ui", ui)
163 164 for f in presetupfuncs or []:
164 165 f(ui, obj)
165 166 ui.log(b'extension', b'- executing reposetup hooks\n')
166 167 with util.timedcm('all reposetup') as allreposetupstats:
167 168 for name, module in extensions.extensions(ui):
168 169 ui.log(b'extension', b' - running reposetup for %s\n', name)
169 170 hook = getattr(module, 'reposetup', None)
170 171 if hook:
171 172 with util.timedcm('reposetup %r', name) as stats:
172 173 hook(ui, obj)
173 174 ui.log(b'extension', b' > reposetup for %s took %s\n',
174 175 name, stats)
175 176 ui.log(b'extension', b'> all reposetup took %s\n', allreposetupstats)
176 177 if not obj.local():
177 178 for f in wirepeersetupfuncs:
178 179 f(ui, obj)
179 180 return obj
180 181
181 182 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
182 183 createopts=None):
183 184 """return a repository object for the specified path"""
184 185 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
185 186 intents=intents, createopts=createopts)
186 187 repo = peer.local()
187 188 if not repo:
188 189 raise error.Abort(_("repository '%s' is not local") %
189 190 (path or peer.url()))
190 191 return repo.filtered('visible')
191 192
192 193 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
193 194 '''return a repository peer for the specified path'''
194 195 rui = remoteui(uiorrepo, opts)
195 196 return _peerorrepo(rui, path, create, intents=intents,
196 197 createopts=createopts).peer()
197 198
198 199 def defaultdest(source):
199 200 '''return default destination of clone if none is given
200 201
201 202 >>> defaultdest(b'foo')
202 203 'foo'
203 204 >>> defaultdest(b'/foo/bar')
204 205 'bar'
205 206 >>> defaultdest(b'/')
206 207 ''
207 208 >>> defaultdest(b'')
208 209 ''
209 210 >>> defaultdest(b'http://example.org/')
210 211 ''
211 212 >>> defaultdest(b'http://example.org/foo/')
212 213 'foo'
213 214 '''
214 215 path = util.url(source).path
215 216 if not path:
216 217 return ''
217 218 return os.path.basename(os.path.normpath(path))
218 219
219 220 def sharedreposource(repo):
220 221 """Returns repository object for source repository of a shared repo.
221 222
222 223 If repo is not a shared repository, returns None.
223 224 """
224 225 if repo.sharedpath == repo.path:
225 226 return None
226 227
227 228 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
228 229 return repo.srcrepo
229 230
230 231 # the sharedpath always ends in the .hg; we want the path to the repo
231 232 source = repo.vfs.split(repo.sharedpath)[0]
232 233 srcurl, branches = parseurl(source)
233 234 srcrepo = repository(repo.ui, srcurl)
234 235 repo.srcrepo = srcrepo
235 236 return srcrepo
236 237
237 238 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
238 239 relative=False):
239 240 '''create a shared repository'''
240 241
241 242 if not islocal(source):
242 243 raise error.Abort(_('can only share local repositories'))
243 244
244 245 if not dest:
245 246 dest = defaultdest(source)
246 247 else:
247 248 dest = ui.expandpath(dest)
248 249
249 250 if isinstance(source, bytes):
250 251 origsource = ui.expandpath(source)
251 252 source, branches = parseurl(origsource)
252 253 srcrepo = repository(ui, source)
253 254 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
254 255 else:
255 256 srcrepo = source.local()
256 257 checkout = None
257 258
258 259 shareditems = set()
259 260 if bookmarks:
260 261 shareditems.add(sharedbookmarks)
261 262
262 263 r = repository(ui, dest, create=True, createopts={
263 264 'sharedrepo': srcrepo,
264 265 'sharedrelative': relative,
265 266 'shareditems': shareditems,
266 267 })
267 268
268 269 postshare(srcrepo, r, defaultpath=defaultpath)
269 270 r = repository(ui, dest)
270 271 _postshareupdate(r, update, checkout=checkout)
271 272 return r
272 273
273 274 def unshare(ui, repo):
274 275 """convert a shared repository to a normal one
275 276
276 277 Copy the store data to the repo and remove the sharedpath data.
277 278
278 279 Returns a new repository object representing the unshared repository.
279 280
280 281 The passed repository object is not usable after this function is
281 282 called.
282 283 """
283 284
284 285 destlock = lock = None
285 286 lock = repo.lock()
286 287 try:
287 288 # we use locks here because if we race with commit, we
288 289 # can end up with extra data in the cloned revlogs that's
289 290 # not pointed to by changesets, thus causing verify to
290 291 # fail
291 292
292 293 destlock = copystore(ui, repo, repo.path)
293 294
294 295 sharefile = repo.vfs.join('sharedpath')
295 296 util.rename(sharefile, sharefile + '.old')
296 297
297 298 repo.requirements.discard('shared')
298 299 repo.requirements.discard('relshared')
299 300 repo._writerequirements()
300 301 finally:
301 302 destlock and destlock.release()
302 303 lock and lock.release()
303 304
304 305 # Removing share changes some fundamental properties of the repo instance.
305 306 # So we instantiate a new repo object and operate on it rather than
306 307 # try to keep the existing repo usable.
307 308 newrepo = repository(repo.baseui, repo.root, create=False)
308 309
309 310 # TODO: figure out how to access subrepos that exist, but were previously
310 311 # removed from .hgsub
311 312 c = newrepo['.']
312 313 subs = c.substate
313 314 for s in sorted(subs):
314 315 c.sub(s).unshare()
315 316
316 317 localrepo.poisonrepository(repo)
317 318
318 319 return newrepo
319 320
320 321 def postshare(sourcerepo, destrepo, defaultpath=None):
321 322 """Called after a new shared repo is created.
322 323
323 324 The new repo only has a requirements file and pointer to the source.
324 325 This function configures additional shared data.
325 326
326 327 Extensions can wrap this function and write additional entries to
327 328 destrepo/.hg/shared to indicate additional pieces of data to be shared.
328 329 """
329 330 default = defaultpath or sourcerepo.ui.config('paths', 'default')
330 331 if default:
331 332 template = ('[paths]\n'
332 333 'default = %s\n')
333 334 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
335 if repositorymod.NARROW_REQUIREMENT in sourcerepo.requirements:
336 with destrepo.wlock():
337 narrowspec.copytoworkingcopy(destrepo, None)
334 338
335 339 def _postshareupdate(repo, update, checkout=None):
336 340 """Maybe perform a working directory update after a shared repo is created.
337 341
338 342 ``update`` can be a boolean or a revision to update to.
339 343 """
340 344 if not update:
341 345 return
342 346
343 347 repo.ui.status(_("updating working directory\n"))
344 348 if update is not True:
345 349 checkout = update
346 350 for test in (checkout, 'default', 'tip'):
347 351 if test is None:
348 352 continue
349 353 try:
350 354 uprev = repo.lookup(test)
351 355 break
352 356 except error.RepoLookupError:
353 357 continue
354 358 _update(repo, uprev)
355 359
356 360 def copystore(ui, srcrepo, destpath):
357 361 '''copy files from store of srcrepo in destpath
358 362
359 363 returns destlock
360 364 '''
361 365 destlock = None
362 366 try:
363 367 hardlink = None
364 368 topic = _('linking') if hardlink else _('copying')
365 369 with ui.makeprogress(topic, unit=_('files')) as progress:
366 370 num = 0
367 371 srcpublishing = srcrepo.publishing()
368 372 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
369 373 dstvfs = vfsmod.vfs(destpath)
370 374 for f in srcrepo.store.copylist():
371 375 if srcpublishing and f.endswith('phaseroots'):
372 376 continue
373 377 dstbase = os.path.dirname(f)
374 378 if dstbase and not dstvfs.exists(dstbase):
375 379 dstvfs.mkdir(dstbase)
376 380 if srcvfs.exists(f):
377 381 if f.endswith('data'):
378 382 # 'dstbase' may be empty (e.g. revlog format 0)
379 383 lockfile = os.path.join(dstbase, "lock")
380 384 # lock to avoid premature writing to the target
381 385 destlock = lock.lock(dstvfs, lockfile)
382 386 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
383 387 hardlink, progress)
384 388 num += n
385 389 if hardlink:
386 390 ui.debug("linked %d files\n" % num)
387 391 else:
388 392 ui.debug("copied %d files\n" % num)
389 393 return destlock
390 394 except: # re-raises
391 395 release(destlock)
392 396 raise
393 397
394 398 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
395 399 rev=None, update=True, stream=False):
396 400 """Perform a clone using a shared repo.
397 401
398 402 The store for the repository will be located at <sharepath>/.hg. The
399 403 specified revisions will be cloned or pulled from "source". A shared repo
400 404 will be created at "dest" and a working copy will be created if "update" is
401 405 True.
402 406 """
403 407 revs = None
404 408 if rev:
405 409 if not srcpeer.capable('lookup'):
406 410 raise error.Abort(_("src repository does not support "
407 411 "revision lookup and so doesn't "
408 412 "support clone by revision"))
409 413
410 414 # TODO this is batchable.
411 415 remoterevs = []
412 416 for r in rev:
413 417 with srcpeer.commandexecutor() as e:
414 418 remoterevs.append(e.callcommand('lookup', {
415 419 'key': r,
416 420 }).result())
417 421 revs = remoterevs
418 422
419 423 # Obtain a lock before checking for or cloning the pooled repo otherwise
420 424 # 2 clients may race creating or populating it.
421 425 pooldir = os.path.dirname(sharepath)
422 426 # lock class requires the directory to exist.
423 427 try:
424 428 util.makedir(pooldir, False)
425 429 except OSError as e:
426 430 if e.errno != errno.EEXIST:
427 431 raise
428 432
429 433 poolvfs = vfsmod.vfs(pooldir)
430 434 basename = os.path.basename(sharepath)
431 435
432 436 with lock.lock(poolvfs, '%s.lock' % basename):
433 437 if os.path.exists(sharepath):
434 438 ui.status(_('(sharing from existing pooled repository %s)\n') %
435 439 basename)
436 440 else:
437 441 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
438 442 # Always use pull mode because hardlinks in share mode don't work
439 443 # well. Never update because working copies aren't necessary in
440 444 # share mode.
441 445 clone(ui, peeropts, source, dest=sharepath, pull=True,
442 446 revs=rev, update=False, stream=stream)
443 447
444 448 # Resolve the value to put in [paths] section for the source.
445 449 if islocal(source):
446 450 defaultpath = os.path.abspath(util.urllocalpath(source))
447 451 else:
448 452 defaultpath = source
449 453
450 454 sharerepo = repository(ui, path=sharepath)
451 455 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
452 456 defaultpath=defaultpath)
453 457
454 458 # We need to perform a pull against the dest repo to fetch bookmarks
455 459 # and other non-store data that isn't shared by default. In the case of
456 460 # non-existing shared repo, this means we pull from the remote twice. This
457 461 # is a bit weird. But at the time it was implemented, there wasn't an easy
458 462 # way to pull just non-changegroup data.
459 463 exchange.pull(destrepo, srcpeer, heads=revs)
460 464
461 465 _postshareupdate(destrepo, update)
462 466
463 467 return srcpeer, peer(ui, peeropts, dest)
464 468
465 469 # Recomputing branch cache might be slow on big repos,
466 470 # so just copy it
467 471 def _copycache(srcrepo, dstcachedir, fname):
468 472 """copy a cache from srcrepo to destcachedir (if it exists)"""
469 473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
470 474 dstbranchcache = os.path.join(dstcachedir, fname)
471 475 if os.path.exists(srcbranchcache):
472 476 if not os.path.exists(dstcachedir):
473 477 os.mkdir(dstcachedir)
474 478 util.copyfile(srcbranchcache, dstbranchcache)
475 479
476 480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
477 481 update=True, stream=False, branch=None, shareopts=None,
478 482 storeincludepats=None, storeexcludepats=None, depth=None):
479 483 """Make a copy of an existing repository.
480 484
481 485 Create a copy of an existing repository in a new directory. The
482 486 source and destination are URLs, as passed to the repository
483 487 function. Returns a pair of repository peers, the source and
484 488 newly created destination.
485 489
486 490 The location of the source is added to the new repository's
487 491 .hg/hgrc file, as the default to be used for future pulls and
488 492 pushes.
489 493
490 494 If an exception is raised, the partly cloned/updated destination
491 495 repository will be deleted.
492 496
493 497 Arguments:
494 498
495 499 source: repository object or URL
496 500
497 501 dest: URL of destination repository to create (defaults to base
498 502 name of source repository)
499 503
500 504 pull: always pull from source repository, even in local case or if the
501 505 server prefers streaming
502 506
503 507 stream: stream raw data uncompressed from repository (fast over
504 508 LAN, slow over WAN)
505 509
506 510 revs: revision to clone up to (implies pull=True)
507 511
508 512 update: update working directory after clone completes, if
509 513 destination is local repository (True means update to default rev,
510 514 anything else is treated as a revision)
511 515
512 516 branch: branches to clone
513 517
514 518 shareopts: dict of options to control auto sharing behavior. The "pool" key
515 519 activates auto sharing mode and defines the directory for stores. The
516 520 "mode" key determines how to construct the directory name of the shared
517 521 repository. "identity" means the name is derived from the node of the first
518 522 changeset in the repository. "remote" means the name is derived from the
519 523 remote's path/URL. Defaults to "identity."
520 524
521 525 storeincludepats and storeexcludepats: sets of file patterns to include and
522 526 exclude in the repository copy, respectively. If not defined, all files
523 527 will be included (a "full" clone). Otherwise a "narrow" clone containing
524 528 only the requested files will be performed. If ``storeincludepats`` is not
525 529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
526 530 ``path:.``. If both are empty sets, no files will be cloned.
527 531 """
528 532
529 533 if isinstance(source, bytes):
530 534 origsource = ui.expandpath(source)
531 535 source, branches = parseurl(origsource, branch)
532 536 srcpeer = peer(ui, peeropts, source)
533 537 else:
534 538 srcpeer = source.peer() # in case we were called with a localrepo
535 539 branches = (None, branch or [])
536 540 origsource = source = srcpeer.url()
537 541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
538 542
539 543 if dest is None:
540 544 dest = defaultdest(source)
541 545 if dest:
542 546 ui.status(_("destination directory: %s\n") % dest)
543 547 else:
544 548 dest = ui.expandpath(dest)
545 549
546 550 dest = util.urllocalpath(dest)
547 551 source = util.urllocalpath(source)
548 552
549 553 if not dest:
550 554 raise error.Abort(_("empty destination path is not valid"))
551 555
552 556 destvfs = vfsmod.vfs(dest, expandpath=True)
553 557 if destvfs.lexists():
554 558 if not destvfs.isdir():
555 559 raise error.Abort(_("destination '%s' already exists") % dest)
556 560 elif destvfs.listdir():
557 561 raise error.Abort(_("destination '%s' is not empty") % dest)
558 562
559 563 createopts = {}
560 564 narrow = False
561 565
562 566 if storeincludepats is not None:
563 567 narrowspec.validatepatterns(storeincludepats)
564 568 narrow = True
565 569
566 570 if storeexcludepats is not None:
567 571 narrowspec.validatepatterns(storeexcludepats)
568 572 narrow = True
569 573
570 574 if narrow:
571 575 # Include everything by default if only exclusion patterns defined.
572 576 if storeexcludepats and not storeincludepats:
573 577 storeincludepats = {'path:.'}
574 578
575 579 createopts['narrowfiles'] = True
576 580
577 581 if depth:
578 582 createopts['shallowfilestore'] = True
579 583
580 584 if srcpeer.capable(b'lfs-serve'):
581 585 # Repository creation honors the config if it disabled the extension, so
582 586 # we can't just announce that lfs will be enabled. This check avoids
583 587 # saying that lfs will be enabled, and then saying it's an unknown
584 588 # feature. The lfs creation option is set in either case so that a
585 589 # requirement is added. If the extension is explicitly disabled but the
586 590 # requirement is set, the clone aborts early, before transferring any
587 591 # data.
588 592 createopts['lfs'] = True
589 593
590 594 if extensions.disabledext('lfs'):
591 595 ui.status(_('(remote is using large file support (lfs), but it is '
592 596 'explicitly disabled in the local configuration)\n'))
593 597 else:
594 598 ui.status(_('(remote is using large file support (lfs); lfs will '
595 599 'be enabled for this repository)\n'))
596 600
597 601 shareopts = shareopts or {}
598 602 sharepool = shareopts.get('pool')
599 603 sharenamemode = shareopts.get('mode')
600 604 if sharepool and islocal(dest):
601 605 sharepath = None
602 606 if sharenamemode == 'identity':
603 607 # Resolve the name from the initial changeset in the remote
604 608 # repository. This returns nullid when the remote is empty. It
605 609 # raises RepoLookupError if revision 0 is filtered or otherwise
606 610 # not available. If we fail to resolve, sharing is not enabled.
607 611 try:
608 612 with srcpeer.commandexecutor() as e:
609 613 rootnode = e.callcommand('lookup', {
610 614 'key': '0',
611 615 }).result()
612 616
613 617 if rootnode != node.nullid:
614 618 sharepath = os.path.join(sharepool, node.hex(rootnode))
615 619 else:
616 620 ui.status(_('(not using pooled storage: '
617 621 'remote appears to be empty)\n'))
618 622 except error.RepoLookupError:
619 623 ui.status(_('(not using pooled storage: '
620 624 'unable to resolve identity of remote)\n'))
621 625 elif sharenamemode == 'remote':
622 626 sharepath = os.path.join(
623 627 sharepool, node.hex(hashlib.sha1(source).digest()))
624 628 else:
625 629 raise error.Abort(_('unknown share naming mode: %s') %
626 630 sharenamemode)
627 631
628 632 # TODO this is a somewhat arbitrary restriction.
629 633 if narrow:
630 634 ui.status(_('(pooled storage not supported for narrow clones)\n'))
631 635 sharepath = None
632 636
633 637 if sharepath:
634 638 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
635 639 dest, pull=pull, rev=revs, update=update,
636 640 stream=stream)
637 641
638 642 srclock = destlock = cleandir = None
639 643 srcrepo = srcpeer.local()
640 644 try:
641 645 abspath = origsource
642 646 if islocal(origsource):
643 647 abspath = os.path.abspath(util.urllocalpath(origsource))
644 648
645 649 if islocal(dest):
646 650 cleandir = dest
647 651
648 652 copy = False
649 653 if (srcrepo and srcrepo.cancopy() and islocal(dest)
650 654 and not phases.hassecret(srcrepo)):
651 655 copy = not pull and not revs
652 656
653 657 # TODO this is a somewhat arbitrary restriction.
654 658 if narrow:
655 659 copy = False
656 660
657 661 if copy:
658 662 try:
659 663 # we use a lock here because if we race with commit, we
660 664 # can end up with extra data in the cloned revlogs that's
661 665 # not pointed to by changesets, thus causing verify to
662 666 # fail
663 667 srclock = srcrepo.lock(wait=False)
664 668 except error.LockError:
665 669 copy = False
666 670
667 671 if copy:
668 672 srcrepo.hook('preoutgoing', throw=True, source='clone')
669 673 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
670 674 if not os.path.exists(dest):
671 675 util.makedirs(dest)
672 676 else:
673 677 # only clean up directories we create ourselves
674 678 cleandir = hgdir
675 679 try:
676 680 destpath = hgdir
677 681 util.makedir(destpath, notindexed=True)
678 682 except OSError as inst:
679 683 if inst.errno == errno.EEXIST:
680 684 cleandir = None
681 685 raise error.Abort(_("destination '%s' already exists")
682 686 % dest)
683 687 raise
684 688
685 689 destlock = copystore(ui, srcrepo, destpath)
686 690 # copy bookmarks over
687 691 srcbookmarks = srcrepo.vfs.join('bookmarks')
688 692 dstbookmarks = os.path.join(destpath, 'bookmarks')
689 693 if os.path.exists(srcbookmarks):
690 694 util.copyfile(srcbookmarks, dstbookmarks)
691 695
692 696 dstcachedir = os.path.join(destpath, 'cache')
693 697 for cache in cacheutil.cachetocopy(srcrepo):
694 698 _copycache(srcrepo, dstcachedir, cache)
695 699
696 700 # we need to re-init the repo after manually copying the data
697 701 # into it
698 702 destpeer = peer(srcrepo, peeropts, dest)
699 703 srcrepo.hook('outgoing', source='clone',
700 704 node=node.hex(node.nullid))
701 705 else:
702 706 try:
703 707 # only pass ui when no srcrepo
704 708 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
705 709 createopts=createopts)
706 710 except OSError as inst:
707 711 if inst.errno == errno.EEXIST:
708 712 cleandir = None
709 713 raise error.Abort(_("destination '%s' already exists")
710 714 % dest)
711 715 raise
712 716
713 717 if revs:
714 718 if not srcpeer.capable('lookup'):
715 719 raise error.Abort(_("src repository does not support "
716 720 "revision lookup and so doesn't "
717 721 "support clone by revision"))
718 722
719 723 # TODO this is batchable.
720 724 remoterevs = []
721 725 for rev in revs:
722 726 with srcpeer.commandexecutor() as e:
723 727 remoterevs.append(e.callcommand('lookup', {
724 728 'key': rev,
725 729 }).result())
726 730 revs = remoterevs
727 731
728 732 checkout = revs[0]
729 733 else:
730 734 revs = None
731 735 local = destpeer.local()
732 736 if local:
733 737 if narrow:
734 with local.lock():
738 with local.wlock(), local.lock():
735 739 local.setnarrowpats(storeincludepats, storeexcludepats)
736 740
737 741 u = util.url(abspath)
738 742 defaulturl = bytes(u)
739 743 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
740 744 if not stream:
741 745 if pull:
742 746 stream = False
743 747 else:
744 748 stream = None
745 749 # internal config: ui.quietbookmarkmove
746 750 overrides = {('ui', 'quietbookmarkmove'): True}
747 751 with local.ui.configoverride(overrides, 'clone'):
748 752 exchange.pull(local, srcpeer, revs,
749 753 streamclonerequested=stream,
750 754 includepats=storeincludepats,
751 755 excludepats=storeexcludepats,
752 756 depth=depth)
753 757 elif srcrepo:
754 758 # TODO lift restriction once exchange.push() accepts narrow
755 759 # push.
756 760 if narrow:
757 761 raise error.Abort(_('narrow clone not available for '
758 762 'remote destinations'))
759 763
760 764 exchange.push(srcrepo, destpeer, revs=revs,
761 765 bookmarks=srcrepo._bookmarks.keys())
762 766 else:
763 767 raise error.Abort(_("clone from remote to remote not supported")
764 768 )
765 769
766 770 cleandir = None
767 771
768 772 destrepo = destpeer.local()
769 773 if destrepo:
770 774 template = uimod.samplehgrcs['cloned']
771 775 u = util.url(abspath)
772 776 u.passwd = None
773 777 defaulturl = bytes(u)
774 778 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
775 779 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
776 780
777 781 if ui.configbool('experimental', 'remotenames'):
778 782 logexchange.pullremotenames(destrepo, srcpeer)
779 783
780 784 if update:
781 785 if update is not True:
782 786 with srcpeer.commandexecutor() as e:
783 787 checkout = e.callcommand('lookup', {
784 788 'key': update,
785 789 }).result()
786 790
787 791 uprev = None
788 792 status = None
789 793 if checkout is not None:
790 794 # Some extensions (at least hg-git and hg-subversion) have
791 795 # a peer.lookup() implementation that returns a name instead
792 796 # of a nodeid. We work around it here until we've figured
793 797 # out a better solution.
794 798 if len(checkout) == 20 and checkout in destrepo:
795 799 uprev = checkout
796 800 elif scmutil.isrevsymbol(destrepo, checkout):
797 801 uprev = scmutil.revsymbol(destrepo, checkout).node()
798 802 else:
799 803 if update is not True:
800 804 try:
801 805 uprev = destrepo.lookup(update)
802 806 except error.RepoLookupError:
803 807 pass
804 808 if uprev is None:
805 809 try:
806 810 uprev = destrepo._bookmarks['@']
807 811 update = '@'
808 812 bn = destrepo[uprev].branch()
809 813 if bn == 'default':
810 814 status = _("updating to bookmark @\n")
811 815 else:
812 816 status = (_("updating to bookmark @ on branch %s\n")
813 817 % bn)
814 818 except KeyError:
815 819 try:
816 820 uprev = destrepo.branchtip('default')
817 821 except error.RepoLookupError:
818 822 uprev = destrepo.lookup('tip')
819 823 if not status:
820 824 bn = destrepo[uprev].branch()
821 825 status = _("updating to branch %s\n") % bn
822 826 destrepo.ui.status(status)
823 827 _update(destrepo, uprev)
824 828 if update in destrepo._bookmarks:
825 829 bookmarks.activate(destrepo, update)
826 830 finally:
827 831 release(srclock, destlock)
828 832 if cleandir is not None:
829 833 shutil.rmtree(cleandir, True)
830 834 if srcpeer is not None:
831 835 srcpeer.close()
832 836 return srcpeer, destpeer
833 837
834 838 def _showstats(repo, stats, quietempty=False):
835 839 if quietempty and stats.isempty():
836 840 return
837 841 repo.ui.status(_("%d files updated, %d files merged, "
838 842 "%d files removed, %d files unresolved\n") % (
839 843 stats.updatedcount, stats.mergedcount,
840 844 stats.removedcount, stats.unresolvedcount))
841 845
842 846 def updaterepo(repo, node, overwrite, updatecheck=None):
843 847 """Update the working directory to node.
844 848
845 849 When overwrite is set, changes are clobbered, merged else
846 850
847 851 returns stats (see pydoc mercurial.merge.applyupdates)"""
848 852 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
849 853 labels=['working copy', 'destination'],
850 854 updatecheck=updatecheck)
851 855
852 856 def update(repo, node, quietempty=False, updatecheck=None):
853 857 """update the working directory to node"""
854 858 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
855 859 _showstats(repo, stats, quietempty)
856 860 if stats.unresolvedcount:
857 861 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
858 862 return stats.unresolvedcount > 0
859 863
860 864 # naming conflict in clone()
861 865 _update = update
862 866
863 867 def clean(repo, node, show_stats=True, quietempty=False):
864 868 """forcibly switch the working directory to node, clobbering changes"""
865 869 stats = updaterepo(repo, node, True)
866 870 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
867 871 if show_stats:
868 872 _showstats(repo, stats, quietempty)
869 873 return stats.unresolvedcount > 0
870 874
871 875 # naming conflict in updatetotally()
872 876 _clean = clean
873 877
874 878 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
875 879 """Update the working directory with extra care for non-file components
876 880
877 881 This takes care of non-file components below:
878 882
879 883 :bookmark: might be advanced or (in)activated
880 884
881 885 This takes arguments below:
882 886
883 887 :checkout: to which revision the working directory is updated
884 888 :brev: a name, which might be a bookmark to be activated after updating
885 889 :clean: whether changes in the working directory can be discarded
886 890 :updatecheck: how to deal with a dirty working directory
887 891
888 892 Valid values for updatecheck are (None => linear):
889 893
890 894 * abort: abort if the working directory is dirty
891 895 * none: don't check (merge working directory changes into destination)
892 896 * linear: check that update is linear before merging working directory
893 897 changes into destination
894 898 * noconflict: check that the update does not result in file merges
895 899
896 900 This returns whether conflict is detected at updating or not.
897 901 """
898 902 if updatecheck is None:
899 903 updatecheck = ui.config('commands', 'update.check')
900 904 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
901 905 # If not configured, or invalid value configured
902 906 updatecheck = 'linear'
903 907 with repo.wlock():
904 908 movemarkfrom = None
905 909 warndest = False
906 910 if checkout is None:
907 911 updata = destutil.destupdate(repo, clean=clean)
908 912 checkout, movemarkfrom, brev = updata
909 913 warndest = True
910 914
911 915 if clean:
912 916 ret = _clean(repo, checkout)
913 917 else:
914 918 if updatecheck == 'abort':
915 919 cmdutil.bailifchanged(repo, merge=False)
916 920 updatecheck = 'none'
917 921 ret = _update(repo, checkout, updatecheck=updatecheck)
918 922
919 923 if not ret and movemarkfrom:
920 924 if movemarkfrom == repo['.'].node():
921 925 pass # no-op update
922 926 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
923 927 b = ui.label(repo._activebookmark, 'bookmarks.active')
924 928 ui.status(_("updating bookmark %s\n") % b)
925 929 else:
926 930 # this can happen with a non-linear update
927 931 b = ui.label(repo._activebookmark, 'bookmarks')
928 932 ui.status(_("(leaving bookmark %s)\n") % b)
929 933 bookmarks.deactivate(repo)
930 934 elif brev in repo._bookmarks:
931 935 if brev != repo._activebookmark:
932 936 b = ui.label(brev, 'bookmarks.active')
933 937 ui.status(_("(activating bookmark %s)\n") % b)
934 938 bookmarks.activate(repo, brev)
935 939 elif brev:
936 940 if repo._activebookmark:
937 941 b = ui.label(repo._activebookmark, 'bookmarks')
938 942 ui.status(_("(leaving bookmark %s)\n") % b)
939 943 bookmarks.deactivate(repo)
940 944
941 945 if warndest:
942 946 destutil.statusotherdests(ui, repo)
943 947
944 948 return ret
945 949
946 950 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
947 951 abort=False):
948 952 """Branch merge with node, resolving changes. Return true if any
949 953 unresolved conflicts."""
950 954 if not abort:
951 955 stats = mergemod.update(repo, node, branchmerge=True, force=force,
952 956 mergeforce=mergeforce, labels=labels)
953 957 else:
954 958 ms = mergemod.mergestate.read(repo)
955 959 if ms.active():
956 960 # there were conflicts
957 961 node = ms.localctx.hex()
958 962 else:
959 963 # there were no conficts, mergestate was not stored
960 964 node = repo['.'].hex()
961 965
962 966 repo.ui.status(_("aborting the merge, updating back to"
963 967 " %s\n") % node[:12])
964 968 stats = mergemod.update(repo, node, branchmerge=False, force=True,
965 969 labels=labels)
966 970
967 971 _showstats(repo, stats)
968 972 if stats.unresolvedcount:
969 973 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
970 974 "or 'hg merge --abort' to abandon\n"))
971 975 elif remind and not abort:
972 976 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
973 977 return stats.unresolvedcount > 0
974 978
975 979 def _incoming(displaychlist, subreporecurse, ui, repo, source,
976 980 opts, buffered=False):
977 981 """
978 982 Helper for incoming / gincoming.
979 983 displaychlist gets called with
980 984 (remoterepo, incomingchangesetlist, displayer) parameters,
981 985 and is supposed to contain only code that can't be unified.
982 986 """
983 987 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
984 988 other = peer(repo, opts, source)
985 989 ui.status(_('comparing with %s\n') % util.hidepassword(source))
986 990 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
987 991
988 992 if revs:
989 993 revs = [other.lookup(rev) for rev in revs]
990 994 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
991 995 revs, opts["bundle"], opts["force"])
992 996 try:
993 997 if not chlist:
994 998 ui.status(_("no changes found\n"))
995 999 return subreporecurse()
996 1000 ui.pager('incoming')
997 1001 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
998 1002 buffered=buffered)
999 1003 displaychlist(other, chlist, displayer)
1000 1004 displayer.close()
1001 1005 finally:
1002 1006 cleanupfn()
1003 1007 subreporecurse()
1004 1008 return 0 # exit code is zero since we found incoming changes
1005 1009
1006 1010 def incoming(ui, repo, source, opts):
1007 1011 def subreporecurse():
1008 1012 ret = 1
1009 1013 if opts.get('subrepos'):
1010 1014 ctx = repo[None]
1011 1015 for subpath in sorted(ctx.substate):
1012 1016 sub = ctx.sub(subpath)
1013 1017 ret = min(ret, sub.incoming(ui, source, opts))
1014 1018 return ret
1015 1019
1016 1020 def display(other, chlist, displayer):
1017 1021 limit = logcmdutil.getlimit(opts)
1018 1022 if opts.get('newest_first'):
1019 1023 chlist.reverse()
1020 1024 count = 0
1021 1025 for n in chlist:
1022 1026 if limit is not None and count >= limit:
1023 1027 break
1024 1028 parents = [p for p in other.changelog.parents(n) if p != nullid]
1025 1029 if opts.get('no_merges') and len(parents) == 2:
1026 1030 continue
1027 1031 count += 1
1028 1032 displayer.show(other[n])
1029 1033 return _incoming(display, subreporecurse, ui, repo, source, opts)
1030 1034
1031 1035 def _outgoing(ui, repo, dest, opts):
1032 1036 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1033 1037 if not path:
1034 1038 raise error.Abort(_('default repository not configured!'),
1035 1039 hint=_("see 'hg help config.paths'"))
1036 1040 dest = path.pushloc or path.loc
1037 1041 branches = path.branch, opts.get('branch') or []
1038 1042
1039 1043 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1040 1044 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1041 1045 if revs:
1042 1046 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1043 1047
1044 1048 other = peer(repo, opts, dest)
1045 1049 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1046 1050 force=opts.get('force'))
1047 1051 o = outgoing.missing
1048 1052 if not o:
1049 1053 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1050 1054 return o, other
1051 1055
1052 1056 def outgoing(ui, repo, dest, opts):
1053 1057 def recurse():
1054 1058 ret = 1
1055 1059 if opts.get('subrepos'):
1056 1060 ctx = repo[None]
1057 1061 for subpath in sorted(ctx.substate):
1058 1062 sub = ctx.sub(subpath)
1059 1063 ret = min(ret, sub.outgoing(ui, dest, opts))
1060 1064 return ret
1061 1065
1062 1066 limit = logcmdutil.getlimit(opts)
1063 1067 o, other = _outgoing(ui, repo, dest, opts)
1064 1068 if not o:
1065 1069 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1066 1070 return recurse()
1067 1071
1068 1072 if opts.get('newest_first'):
1069 1073 o.reverse()
1070 1074 ui.pager('outgoing')
1071 1075 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1072 1076 count = 0
1073 1077 for n in o:
1074 1078 if limit is not None and count >= limit:
1075 1079 break
1076 1080 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1077 1081 if opts.get('no_merges') and len(parents) == 2:
1078 1082 continue
1079 1083 count += 1
1080 1084 displayer.show(repo[n])
1081 1085 displayer.close()
1082 1086 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1083 1087 recurse()
1084 1088 return 0 # exit code is zero since we found outgoing changes
1085 1089
1086 1090 def verify(repo):
1087 1091 """verify the consistency of a repository"""
1088 1092 ret = verifymod.verify(repo)
1089 1093
1090 1094 # Broken subrepo references in hidden csets don't seem worth worrying about,
1091 1095 # since they can't be pushed/pulled, and --hidden can be used if they are a
1092 1096 # concern.
1093 1097
1094 1098 # pathto() is needed for -R case
1095 1099 revs = repo.revs("filelog(%s)",
1096 1100 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1097 1101
1098 1102 if revs:
1099 1103 repo.ui.status(_('checking subrepo links\n'))
1100 1104 for rev in revs:
1101 1105 ctx = repo[rev]
1102 1106 try:
1103 1107 for subpath in ctx.substate:
1104 1108 try:
1105 1109 ret = (ctx.sub(subpath, allowcreate=False).verify()
1106 1110 or ret)
1107 1111 except error.RepoError as e:
1108 1112 repo.ui.warn(('%d: %s\n') % (rev, e))
1109 1113 except Exception:
1110 1114 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1111 1115 node.short(ctx.node()))
1112 1116
1113 1117 return ret
1114 1118
1115 1119 def remoteui(src, opts):
1116 1120 'build a remote ui from ui or repo and opts'
1117 1121 if util.safehasattr(src, 'baseui'): # looks like a repository
1118 1122 dst = src.baseui.copy() # drop repo-specific config
1119 1123 src = src.ui # copy target options from repo
1120 1124 else: # assume it's a global ui object
1121 1125 dst = src.copy() # keep all global options
1122 1126
1123 1127 # copy ssh-specific options
1124 1128 for o in 'ssh', 'remotecmd':
1125 1129 v = opts.get(o) or src.config('ui', o)
1126 1130 if v:
1127 1131 dst.setconfig("ui", o, v, 'copied')
1128 1132
1129 1133 # copy bundle-specific options
1130 1134 r = src.config('bundle', 'mainreporoot')
1131 1135 if r:
1132 1136 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1133 1137
1134 1138 # copy selected local settings to the remote ui
1135 1139 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1136 1140 for key, val in src.configitems(sect):
1137 1141 dst.setconfig(sect, key, val, 'copied')
1138 1142 v = src.config('web', 'cacerts')
1139 1143 if v:
1140 1144 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1141 1145
1142 1146 return dst
1143 1147
1144 1148 # Files of interest
1145 1149 # Used to check if the repository has changed looking at mtime and size of
1146 1150 # these files.
1147 1151 foi = [('spath', '00changelog.i'),
1148 1152 ('spath', 'phaseroots'), # ! phase can change content at the same size
1149 1153 ('spath', 'obsstore'),
1150 1154 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1151 1155 ]
1152 1156
1153 1157 class cachedlocalrepo(object):
1154 1158 """Holds a localrepository that can be cached and reused."""
1155 1159
1156 1160 def __init__(self, repo):
1157 1161 """Create a new cached repo from an existing repo.
1158 1162
1159 1163 We assume the passed in repo was recently created. If the
1160 1164 repo has changed between when it was created and when it was
1161 1165 turned into a cache, it may not refresh properly.
1162 1166 """
1163 1167 assert isinstance(repo, localrepo.localrepository)
1164 1168 self._repo = repo
1165 1169 self._state, self.mtime = self._repostate()
1166 1170 self._filtername = repo.filtername
1167 1171
1168 1172 def fetch(self):
1169 1173 """Refresh (if necessary) and return a repository.
1170 1174
1171 1175 If the cached instance is out of date, it will be recreated
1172 1176 automatically and returned.
1173 1177
1174 1178 Returns a tuple of the repo and a boolean indicating whether a new
1175 1179 repo instance was created.
1176 1180 """
1177 1181 # We compare the mtimes and sizes of some well-known files to
1178 1182 # determine if the repo changed. This is not precise, as mtimes
1179 1183 # are susceptible to clock skew and imprecise filesystems and
1180 1184 # file content can change while maintaining the same size.
1181 1185
1182 1186 state, mtime = self._repostate()
1183 1187 if state == self._state:
1184 1188 return self._repo, False
1185 1189
1186 1190 repo = repository(self._repo.baseui, self._repo.url())
1187 1191 if self._filtername:
1188 1192 self._repo = repo.filtered(self._filtername)
1189 1193 else:
1190 1194 self._repo = repo.unfiltered()
1191 1195 self._state = state
1192 1196 self.mtime = mtime
1193 1197
1194 1198 return self._repo, True
1195 1199
1196 1200 def _repostate(self):
1197 1201 state = []
1198 1202 maxmtime = -1
1199 1203 for attr, fname in foi:
1200 1204 prefix = getattr(self._repo, attr)
1201 1205 p = os.path.join(prefix, fname)
1202 1206 try:
1203 1207 st = os.stat(p)
1204 1208 except OSError:
1205 1209 st = os.stat(prefix)
1206 1210 state.append((st[stat.ST_MTIME], st.st_size))
1207 1211 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1208 1212
1209 1213 return tuple(state), maxmtime
1210 1214
1211 1215 def copy(self):
1212 1216 """Obtain a copy of this class instance.
1213 1217
1214 1218 A new localrepository instance is obtained. The new instance should be
1215 1219 completely independent of the original.
1216 1220 """
1217 1221 repo = repository(self._repo.baseui, self._repo.origroot)
1218 1222 if self._filtername:
1219 1223 repo = repo.filtered(self._filtername)
1220 1224 else:
1221 1225 repo = repo.unfiltered()
1222 1226 c = cachedlocalrepo(repo)
1223 1227 c._state = self._state
1224 1228 c.mtime = self.mtime
1225 1229 return c
@@ -1,3077 +1,3085 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import os
13 13 import random
14 14 import sys
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 bin,
21 21 hex,
22 22 nullid,
23 23 nullrev,
24 24 short,
25 25 )
26 26 from . import (
27 27 bookmarks,
28 28 branchmap,
29 29 bundle2,
30 30 changegroup,
31 31 changelog,
32 32 color,
33 33 context,
34 34 dirstate,
35 35 dirstateguard,
36 36 discovery,
37 37 encoding,
38 38 error,
39 39 exchange,
40 40 extensions,
41 41 filelog,
42 42 hook,
43 43 lock as lockmod,
44 44 manifest,
45 45 match as matchmod,
46 46 merge as mergemod,
47 47 mergeutil,
48 48 namespaces,
49 49 narrowspec,
50 50 obsolete,
51 51 pathutil,
52 52 phases,
53 53 pushkey,
54 54 pycompat,
55 55 repository,
56 56 repoview,
57 57 revset,
58 58 revsetlang,
59 59 scmutil,
60 60 sparse,
61 61 store as storemod,
62 62 subrepoutil,
63 63 tags as tagsmod,
64 64 transaction,
65 65 txnutil,
66 66 util,
67 67 vfs as vfsmod,
68 68 )
69 69 from .utils import (
70 70 interfaceutil,
71 71 procutil,
72 72 stringutil,
73 73 )
74 74
75 75 from .revlogutils import (
76 76 constants as revlogconst,
77 77 )
78 78
79 79 release = lockmod.release
80 80 urlerr = util.urlerr
81 81 urlreq = util.urlreq
82 82
83 83 # set of (path, vfs-location) tuples. vfs-location is:
84 84 # - 'plain for vfs relative paths
85 85 # - '' for svfs relative paths
86 86 _cachedfiles = set()
87 87
88 88 class _basefilecache(scmutil.filecache):
89 89 """All filecache usage on repo are done for logic that should be unfiltered
90 90 """
91 91 def __get__(self, repo, type=None):
92 92 if repo is None:
93 93 return self
94 94 # proxy to unfiltered __dict__ since filtered repo has no entry
95 95 unfi = repo.unfiltered()
96 96 try:
97 97 return unfi.__dict__[self.sname]
98 98 except KeyError:
99 99 pass
100 100 return super(_basefilecache, self).__get__(unfi, type)
101 101
102 102 def set(self, repo, value):
103 103 return super(_basefilecache, self).set(repo.unfiltered(), value)
104 104
105 105 class repofilecache(_basefilecache):
106 106 """filecache for files in .hg but outside of .hg/store"""
107 107 def __init__(self, *paths):
108 108 super(repofilecache, self).__init__(*paths)
109 109 for path in paths:
110 110 _cachedfiles.add((path, 'plain'))
111 111
112 112 def join(self, obj, fname):
113 113 return obj.vfs.join(fname)
114 114
115 115 class storecache(_basefilecache):
116 116 """filecache for files in the store"""
117 117 def __init__(self, *paths):
118 118 super(storecache, self).__init__(*paths)
119 119 for path in paths:
120 120 _cachedfiles.add((path, ''))
121 121
122 122 def join(self, obj, fname):
123 123 return obj.sjoin(fname)
124 124
125 125 def isfilecached(repo, name):
126 126 """check if a repo has already cached "name" filecache-ed property
127 127
128 128 This returns (cachedobj-or-None, iscached) tuple.
129 129 """
130 130 cacheentry = repo.unfiltered()._filecache.get(name, None)
131 131 if not cacheentry:
132 132 return None, False
133 133 return cacheentry.obj, True
134 134
135 135 class unfilteredpropertycache(util.propertycache):
136 136 """propertycache that apply to unfiltered repo only"""
137 137
138 138 def __get__(self, repo, type=None):
139 139 unfi = repo.unfiltered()
140 140 if unfi is repo:
141 141 return super(unfilteredpropertycache, self).__get__(unfi)
142 142 return getattr(unfi, self.name)
143 143
144 144 class filteredpropertycache(util.propertycache):
145 145 """propertycache that must take filtering in account"""
146 146
147 147 def cachevalue(self, obj, value):
148 148 object.__setattr__(obj, self.name, value)
149 149
150 150
151 151 def hasunfilteredcache(repo, name):
152 152 """check if a repo has an unfilteredpropertycache value for <name>"""
153 153 return name in vars(repo.unfiltered())
154 154
155 155 def unfilteredmethod(orig):
156 156 """decorate method that always need to be run on unfiltered version"""
157 157 def wrapper(repo, *args, **kwargs):
158 158 return orig(repo.unfiltered(), *args, **kwargs)
159 159 return wrapper
160 160
161 161 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
162 162 'unbundle'}
163 163 legacycaps = moderncaps.union({'changegroupsubset'})
164 164
165 165 @interfaceutil.implementer(repository.ipeercommandexecutor)
166 166 class localcommandexecutor(object):
167 167 def __init__(self, peer):
168 168 self._peer = peer
169 169 self._sent = False
170 170 self._closed = False
171 171
172 172 def __enter__(self):
173 173 return self
174 174
175 175 def __exit__(self, exctype, excvalue, exctb):
176 176 self.close()
177 177
178 178 def callcommand(self, command, args):
179 179 if self._sent:
180 180 raise error.ProgrammingError('callcommand() cannot be used after '
181 181 'sendcommands()')
182 182
183 183 if self._closed:
184 184 raise error.ProgrammingError('callcommand() cannot be used after '
185 185 'close()')
186 186
187 187 # We don't need to support anything fancy. Just call the named
188 188 # method on the peer and return a resolved future.
189 189 fn = getattr(self._peer, pycompat.sysstr(command))
190 190
191 191 f = pycompat.futures.Future()
192 192
193 193 try:
194 194 result = fn(**pycompat.strkwargs(args))
195 195 except Exception:
196 196 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
197 197 else:
198 198 f.set_result(result)
199 199
200 200 return f
201 201
202 202 def sendcommands(self):
203 203 self._sent = True
204 204
205 205 def close(self):
206 206 self._closed = True
207 207
208 208 @interfaceutil.implementer(repository.ipeercommands)
209 209 class localpeer(repository.peer):
210 210 '''peer for a local repo; reflects only the most recent API'''
211 211
212 212 def __init__(self, repo, caps=None):
213 213 super(localpeer, self).__init__()
214 214
215 215 if caps is None:
216 216 caps = moderncaps.copy()
217 217 self._repo = repo.filtered('served')
218 218 self.ui = repo.ui
219 219 self._caps = repo._restrictcapabilities(caps)
220 220
221 221 # Begin of _basepeer interface.
222 222
223 223 def url(self):
224 224 return self._repo.url()
225 225
226 226 def local(self):
227 227 return self._repo
228 228
229 229 def peer(self):
230 230 return self
231 231
232 232 def canpush(self):
233 233 return True
234 234
235 235 def close(self):
236 236 self._repo.close()
237 237
238 238 # End of _basepeer interface.
239 239
240 240 # Begin of _basewirecommands interface.
241 241
242 242 def branchmap(self):
243 243 return self._repo.branchmap()
244 244
245 245 def capabilities(self):
246 246 return self._caps
247 247
248 248 def clonebundles(self):
249 249 return self._repo.tryread('clonebundles.manifest')
250 250
251 251 def debugwireargs(self, one, two, three=None, four=None, five=None):
252 252 """Used to test argument passing over the wire"""
253 253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
254 254 pycompat.bytestr(four),
255 255 pycompat.bytestr(five))
256 256
257 257 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
258 258 **kwargs):
259 259 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
260 260 common=common, bundlecaps=bundlecaps,
261 261 **kwargs)[1]
262 262 cb = util.chunkbuffer(chunks)
263 263
264 264 if exchange.bundle2requested(bundlecaps):
265 265 # When requesting a bundle2, getbundle returns a stream to make the
266 266 # wire level function happier. We need to build a proper object
267 267 # from it in local peer.
268 268 return bundle2.getunbundler(self.ui, cb)
269 269 else:
270 270 return changegroup.getunbundler('01', cb, None)
271 271
272 272 def heads(self):
273 273 return self._repo.heads()
274 274
275 275 def known(self, nodes):
276 276 return self._repo.known(nodes)
277 277
278 278 def listkeys(self, namespace):
279 279 return self._repo.listkeys(namespace)
280 280
281 281 def lookup(self, key):
282 282 return self._repo.lookup(key)
283 283
284 284 def pushkey(self, namespace, key, old, new):
285 285 return self._repo.pushkey(namespace, key, old, new)
286 286
287 287 def stream_out(self):
288 288 raise error.Abort(_('cannot perform stream clone against local '
289 289 'peer'))
290 290
291 291 def unbundle(self, bundle, heads, url):
292 292 """apply a bundle on a repo
293 293
294 294 This function handles the repo locking itself."""
295 295 try:
296 296 try:
297 297 bundle = exchange.readbundle(self.ui, bundle, None)
298 298 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
299 299 if util.safehasattr(ret, 'getchunks'):
300 300 # This is a bundle20 object, turn it into an unbundler.
301 301 # This little dance should be dropped eventually when the
302 302 # API is finally improved.
303 303 stream = util.chunkbuffer(ret.getchunks())
304 304 ret = bundle2.getunbundler(self.ui, stream)
305 305 return ret
306 306 except Exception as exc:
307 307 # If the exception contains output salvaged from a bundle2
308 308 # reply, we need to make sure it is printed before continuing
309 309 # to fail. So we build a bundle2 with such output and consume
310 310 # it directly.
311 311 #
312 312 # This is not very elegant but allows a "simple" solution for
313 313 # issue4594
314 314 output = getattr(exc, '_bundle2salvagedoutput', ())
315 315 if output:
316 316 bundler = bundle2.bundle20(self._repo.ui)
317 317 for out in output:
318 318 bundler.addpart(out)
319 319 stream = util.chunkbuffer(bundler.getchunks())
320 320 b = bundle2.getunbundler(self.ui, stream)
321 321 bundle2.processbundle(self._repo, b)
322 322 raise
323 323 except error.PushRaced as exc:
324 324 raise error.ResponseError(_('push failed:'),
325 325 stringutil.forcebytestr(exc))
326 326
327 327 # End of _basewirecommands interface.
328 328
329 329 # Begin of peer interface.
330 330
331 331 def commandexecutor(self):
332 332 return localcommandexecutor(self)
333 333
334 334 # End of peer interface.
335 335
336 336 @interfaceutil.implementer(repository.ipeerlegacycommands)
337 337 class locallegacypeer(localpeer):
338 338 '''peer extension which implements legacy methods too; used for tests with
339 339 restricted capabilities'''
340 340
341 341 def __init__(self, repo):
342 342 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
343 343
344 344 # Begin of baselegacywirecommands interface.
345 345
346 346 def between(self, pairs):
347 347 return self._repo.between(pairs)
348 348
349 349 def branches(self, nodes):
350 350 return self._repo.branches(nodes)
351 351
352 352 def changegroup(self, nodes, source):
353 353 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
354 354 missingheads=self._repo.heads())
355 355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356 356
357 357 def changegroupsubset(self, bases, heads, source):
358 358 outgoing = discovery.outgoing(self._repo, missingroots=bases,
359 359 missingheads=heads)
360 360 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
361 361
362 362 # End of baselegacywirecommands interface.
363 363
364 364 # Increment the sub-version when the revlog v2 format changes to lock out old
365 365 # clients.
366 366 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
367 367
368 368 # A repository with the sparserevlog feature will have delta chains that
369 369 # can spread over a larger span. Sparse reading cuts these large spans into
370 370 # pieces, so that each piece isn't too big.
371 371 # Without the sparserevlog capability, reading from the repository could use
372 372 # huge amounts of memory, because the whole span would be read at once,
373 373 # including all the intermediate revisions that aren't pertinent for the chain.
374 374 # This is why once a repository has enabled sparse-read, it becomes required.
375 375 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
376 376
377 377 # Functions receiving (ui, features) that extensions can register to impact
378 378 # the ability to load repositories with custom requirements. Only
379 379 # functions defined in loaded extensions are called.
380 380 #
381 381 # The function receives a set of requirement strings that the repository
382 382 # is capable of opening. Functions will typically add elements to the
383 383 # set to reflect that the extension knows how to handle that requirements.
384 384 featuresetupfuncs = set()
385 385
386 386 def makelocalrepository(baseui, path, intents=None):
387 387 """Create a local repository object.
388 388
389 389 Given arguments needed to construct a local repository, this function
390 390 performs various early repository loading functionality (such as
391 391 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
392 392 the repository can be opened, derives a type suitable for representing
393 393 that repository, and returns an instance of it.
394 394
395 395 The returned object conforms to the ``repository.completelocalrepository``
396 396 interface.
397 397
398 398 The repository type is derived by calling a series of factory functions
399 399 for each aspect/interface of the final repository. These are defined by
400 400 ``REPO_INTERFACES``.
401 401
402 402 Each factory function is called to produce a type implementing a specific
403 403 interface. The cumulative list of returned types will be combined into a
404 404 new type and that type will be instantiated to represent the local
405 405 repository.
406 406
407 407 The factory functions each receive various state that may be consulted
408 408 as part of deriving a type.
409 409
410 410 Extensions should wrap these factory functions to customize repository type
411 411 creation. Note that an extension's wrapped function may be called even if
412 412 that extension is not loaded for the repo being constructed. Extensions
413 413 should check if their ``__name__`` appears in the
414 414 ``extensionmodulenames`` set passed to the factory function and no-op if
415 415 not.
416 416 """
417 417 ui = baseui.copy()
418 418 # Prevent copying repo configuration.
419 419 ui.copy = baseui.copy
420 420
421 421 # Working directory VFS rooted at repository root.
422 422 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
423 423
424 424 # Main VFS for .hg/ directory.
425 425 hgpath = wdirvfs.join(b'.hg')
426 426 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
427 427
428 428 # The .hg/ path should exist and should be a directory. All other
429 429 # cases are errors.
430 430 if not hgvfs.isdir():
431 431 try:
432 432 hgvfs.stat()
433 433 except OSError as e:
434 434 if e.errno != errno.ENOENT:
435 435 raise
436 436
437 437 raise error.RepoError(_(b'repository %s not found') % path)
438 438
439 439 # .hg/requires file contains a newline-delimited list of
440 440 # features/capabilities the opener (us) must have in order to use
441 441 # the repository. This file was introduced in Mercurial 0.9.2,
442 442 # which means very old repositories may not have one. We assume
443 443 # a missing file translates to no requirements.
444 444 try:
445 445 requirements = set(hgvfs.read(b'requires').splitlines())
446 446 except IOError as e:
447 447 if e.errno != errno.ENOENT:
448 448 raise
449 449 requirements = set()
450 450
451 451 # The .hg/hgrc file may load extensions or contain config options
452 452 # that influence repository construction. Attempt to load it and
453 453 # process any new extensions that it may have pulled in.
454 454 if loadhgrc(ui, wdirvfs, hgvfs, requirements):
455 455 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
456 456 extensions.loadall(ui)
457 457 extensions.populateui(ui)
458 458
459 459 # Set of module names of extensions loaded for this repository.
460 460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461 461
462 462 supportedrequirements = gathersupportedrequirements(ui)
463 463
464 464 # We first validate the requirements are known.
465 465 ensurerequirementsrecognized(requirements, supportedrequirements)
466 466
467 467 # Then we validate that the known set is reasonable to use together.
468 468 ensurerequirementscompatible(ui, requirements)
469 469
470 470 # TODO there are unhandled edge cases related to opening repositories with
471 471 # shared storage. If storage is shared, we should also test for requirements
472 472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 473 # that repo, as that repo may load extensions needed to open it. This is a
474 474 # bit complicated because we don't want the other hgrc to overwrite settings
475 475 # in this hgrc.
476 476 #
477 477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 478 # file when sharing repos. But if a requirement is added after the share is
479 479 # performed, thereby introducing a new requirement for the opener, we may
480 480 # will not see that and could encounter a run-time error interacting with
481 481 # that shared store since it has an unknown-to-us requirement.
482 482
483 483 # At this point, we know we should be capable of opening the repository.
484 484 # Now get on with doing that.
485 485
486 486 features = set()
487 487
488 488 # The "store" part of the repository holds versioned data. How it is
489 489 # accessed is determined by various requirements. The ``shared`` or
490 490 # ``relshared`` requirements indicate the store lives in the path contained
491 491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 493 if b'shared' in requirements or b'relshared' in requirements:
494 494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 495 if b'relshared' in requirements:
496 496 sharedpath = hgvfs.join(sharedpath)
497 497
498 498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499 499
500 500 if not sharedvfs.exists():
501 501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 502 b'directory %s') % sharedvfs.base)
503 503
504 504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505 505
506 506 storebasepath = sharedvfs.base
507 507 cachepath = sharedvfs.join(b'cache')
508 508 else:
509 509 storebasepath = hgvfs.base
510 510 cachepath = hgvfs.join(b'cache')
511 511 wcachepath = hgvfs.join(b'wcache')
512 512
513 513
514 514 # The store has changed over time and the exact layout is dictated by
515 515 # requirements. The store interface abstracts differences across all
516 516 # of them.
517 517 store = makestore(requirements, storebasepath,
518 518 lambda base: vfsmod.vfs(base, cacheaudited=True))
519 519 hgvfs.createmode = store.createmode
520 520
521 521 storevfs = store.vfs
522 522 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
523 523
524 524 # The cache vfs is used to manage cache files.
525 525 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
526 526 cachevfs.createmode = store.createmode
527 527 # The cache vfs is used to manage cache files related to the working copy
528 528 wcachevfs = vfsmod.vfs(wcachepath, cacheaudited=True)
529 529 wcachevfs.createmode = store.createmode
530 530
531 531 # Now resolve the type for the repository object. We do this by repeatedly
532 532 # calling a factory function to produces types for specific aspects of the
533 533 # repo's operation. The aggregate returned types are used as base classes
534 534 # for a dynamically-derived type, which will represent our new repository.
535 535
536 536 bases = []
537 537 extrastate = {}
538 538
539 539 for iface, fn in REPO_INTERFACES:
540 540 # We pass all potentially useful state to give extensions tons of
541 541 # flexibility.
542 542 typ = fn()(ui=ui,
543 543 intents=intents,
544 544 requirements=requirements,
545 545 features=features,
546 546 wdirvfs=wdirvfs,
547 547 hgvfs=hgvfs,
548 548 store=store,
549 549 storevfs=storevfs,
550 550 storeoptions=storevfs.options,
551 551 cachevfs=cachevfs,
552 552 wcachevfs=wcachevfs,
553 553 extensionmodulenames=extensionmodulenames,
554 554 extrastate=extrastate,
555 555 baseclasses=bases)
556 556
557 557 if not isinstance(typ, type):
558 558 raise error.ProgrammingError('unable to construct type for %s' %
559 559 iface)
560 560
561 561 bases.append(typ)
562 562
563 563 # type() allows you to use characters in type names that wouldn't be
564 564 # recognized as Python symbols in source code. We abuse that to add
565 565 # rich information about our constructed repo.
566 566 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
567 567 wdirvfs.base,
568 568 b','.join(sorted(requirements))))
569 569
570 570 cls = type(name, tuple(bases), {})
571 571
572 572 return cls(
573 573 baseui=baseui,
574 574 ui=ui,
575 575 origroot=path,
576 576 wdirvfs=wdirvfs,
577 577 hgvfs=hgvfs,
578 578 requirements=requirements,
579 579 supportedrequirements=supportedrequirements,
580 580 sharedpath=storebasepath,
581 581 store=store,
582 582 cachevfs=cachevfs,
583 583 wcachevfs=wcachevfs,
584 584 features=features,
585 585 intents=intents)
586 586
587 587 def loadhgrc(ui, wdirvfs, hgvfs, requirements):
588 588 """Load hgrc files/content into a ui instance.
589 589
590 590 This is called during repository opening to load any additional
591 591 config files or settings relevant to the current repository.
592 592
593 593 Returns a bool indicating whether any additional configs were loaded.
594 594
595 595 Extensions should monkeypatch this function to modify how per-repo
596 596 configs are loaded. For example, an extension may wish to pull in
597 597 configs from alternate files or sources.
598 598 """
599 599 try:
600 600 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
601 601 return True
602 602 except IOError:
603 603 return False
604 604
605 605 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
606 606 """Perform additional actions after .hg/hgrc is loaded.
607 607
608 608 This function is called during repository loading immediately after
609 609 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
610 610
611 611 The function can be used to validate configs, automatically add
612 612 options (including extensions) based on requirements, etc.
613 613 """
614 614
615 615 # Map of requirements to list of extensions to load automatically when
616 616 # requirement is present.
617 617 autoextensions = {
618 618 b'largefiles': [b'largefiles'],
619 619 b'lfs': [b'lfs'],
620 620 }
621 621
622 622 for requirement, names in sorted(autoextensions.items()):
623 623 if requirement not in requirements:
624 624 continue
625 625
626 626 for name in names:
627 627 if not ui.hasconfig(b'extensions', name):
628 628 ui.setconfig(b'extensions', name, b'', source='autoload')
629 629
630 630 def gathersupportedrequirements(ui):
631 631 """Determine the complete set of recognized requirements."""
632 632 # Start with all requirements supported by this file.
633 633 supported = set(localrepository._basesupported)
634 634
635 635 # Execute ``featuresetupfuncs`` entries if they belong to an extension
636 636 # relevant to this ui instance.
637 637 modules = {m.__name__ for n, m in extensions.extensions(ui)}
638 638
639 639 for fn in featuresetupfuncs:
640 640 if fn.__module__ in modules:
641 641 fn(ui, supported)
642 642
643 643 # Add derived requirements from registered compression engines.
644 644 for name in util.compengines:
645 645 engine = util.compengines[name]
646 646 if engine.revlogheader():
647 647 supported.add(b'exp-compression-%s' % name)
648 648
649 649 return supported
650 650
651 651 def ensurerequirementsrecognized(requirements, supported):
652 652 """Validate that a set of local requirements is recognized.
653 653
654 654 Receives a set of requirements. Raises an ``error.RepoError`` if there
655 655 exists any requirement in that set that currently loaded code doesn't
656 656 recognize.
657 657
658 658 Returns a set of supported requirements.
659 659 """
660 660 missing = set()
661 661
662 662 for requirement in requirements:
663 663 if requirement in supported:
664 664 continue
665 665
666 666 if not requirement or not requirement[0:1].isalnum():
667 667 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
668 668
669 669 missing.add(requirement)
670 670
671 671 if missing:
672 672 raise error.RequirementError(
673 673 _(b'repository requires features unknown to this Mercurial: %s') %
674 674 b' '.join(sorted(missing)),
675 675 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
676 676 b'for more information'))
677 677
678 678 def ensurerequirementscompatible(ui, requirements):
679 679 """Validates that a set of recognized requirements is mutually compatible.
680 680
681 681 Some requirements may not be compatible with others or require
682 682 config options that aren't enabled. This function is called during
683 683 repository opening to ensure that the set of requirements needed
684 684 to open a repository is sane and compatible with config options.
685 685
686 686 Extensions can monkeypatch this function to perform additional
687 687 checking.
688 688
689 689 ``error.RepoError`` should be raised on failure.
690 690 """
691 691 if b'exp-sparse' in requirements and not sparse.enabled:
692 692 raise error.RepoError(_(b'repository is using sparse feature but '
693 693 b'sparse is not enabled; enable the '
694 694 b'"sparse" extensions to access'))
695 695
696 696 def makestore(requirements, path, vfstype):
697 697 """Construct a storage object for a repository."""
698 698 if b'store' in requirements:
699 699 if b'fncache' in requirements:
700 700 return storemod.fncachestore(path, vfstype,
701 701 b'dotencode' in requirements)
702 702
703 703 return storemod.encodedstore(path, vfstype)
704 704
705 705 return storemod.basicstore(path, vfstype)
706 706
707 707 def resolvestorevfsoptions(ui, requirements, features):
708 708 """Resolve the options to pass to the store vfs opener.
709 709
710 710 The returned dict is used to influence behavior of the storage layer.
711 711 """
712 712 options = {}
713 713
714 714 if b'treemanifest' in requirements:
715 715 options[b'treemanifest'] = True
716 716
717 717 # experimental config: format.manifestcachesize
718 718 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
719 719 if manifestcachesize is not None:
720 720 options[b'manifestcachesize'] = manifestcachesize
721 721
722 722 # In the absence of another requirement superseding a revlog-related
723 723 # requirement, we have to assume the repo is using revlog version 0.
724 724 # This revlog format is super old and we don't bother trying to parse
725 725 # opener options for it because those options wouldn't do anything
726 726 # meaningful on such old repos.
727 727 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
728 728 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
729 729
730 730 return options
731 731
732 732 def resolverevlogstorevfsoptions(ui, requirements, features):
733 733 """Resolve opener options specific to revlogs."""
734 734
735 735 options = {}
736 736 options[b'flagprocessors'] = {}
737 737
738 738 if b'revlogv1' in requirements:
739 739 options[b'revlogv1'] = True
740 740 if REVLOGV2_REQUIREMENT in requirements:
741 741 options[b'revlogv2'] = True
742 742
743 743 if b'generaldelta' in requirements:
744 744 options[b'generaldelta'] = True
745 745
746 746 # experimental config: format.chunkcachesize
747 747 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
748 748 if chunkcachesize is not None:
749 749 options[b'chunkcachesize'] = chunkcachesize
750 750
751 751 deltabothparents = ui.configbool(b'storage',
752 752 b'revlog.optimize-delta-parent-choice')
753 753 options[b'deltabothparents'] = deltabothparents
754 754
755 755 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
756 756
757 757 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
758 758 if 0 <= chainspan:
759 759 options[b'maxdeltachainspan'] = chainspan
760 760
761 761 mmapindexthreshold = ui.configbytes(b'storage', b'mmap-threshold')
762 762 if mmapindexthreshold is not None:
763 763 options[b'mmapindexthreshold'] = mmapindexthreshold
764 764
765 765 withsparseread = ui.configbool(b'experimental', b'sparse-read')
766 766 srdensitythres = float(ui.config(b'experimental',
767 767 b'sparse-read.density-threshold'))
768 768 srmingapsize = ui.configbytes(b'experimental',
769 769 b'sparse-read.min-gap-size')
770 770 options[b'with-sparse-read'] = withsparseread
771 771 options[b'sparse-read-density-threshold'] = srdensitythres
772 772 options[b'sparse-read-min-gap-size'] = srmingapsize
773 773
774 774 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
775 775 options[b'sparse-revlog'] = sparserevlog
776 776 if sparserevlog:
777 777 options[b'generaldelta'] = True
778 778
779 779 maxchainlen = None
780 780 if sparserevlog:
781 781 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
782 782 # experimental config: format.maxchainlen
783 783 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
784 784 if maxchainlen is not None:
785 785 options[b'maxchainlen'] = maxchainlen
786 786
787 787 for r in requirements:
788 788 if r.startswith(b'exp-compression-'):
789 789 options[b'compengine'] = r[len(b'exp-compression-'):]
790 790
791 791 if repository.NARROW_REQUIREMENT in requirements:
792 792 options[b'enableellipsis'] = True
793 793
794 794 return options
795 795
796 796 def makemain(**kwargs):
797 797 """Produce a type conforming to ``ilocalrepositorymain``."""
798 798 return localrepository
799 799
800 800 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
801 801 class revlogfilestorage(object):
802 802 """File storage when using revlogs."""
803 803
804 804 def file(self, path):
805 805 if path[0] == b'/':
806 806 path = path[1:]
807 807
808 808 return filelog.filelog(self.svfs, path)
809 809
810 810 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
811 811 class revlognarrowfilestorage(object):
812 812 """File storage when using revlogs and narrow files."""
813 813
814 814 def file(self, path):
815 815 if path[0] == b'/':
816 816 path = path[1:]
817 817
818 818 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
819 819
820 820 def makefilestorage(requirements, features, **kwargs):
821 821 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
822 822 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
823 823 features.add(repository.REPO_FEATURE_STREAM_CLONE)
824 824
825 825 if repository.NARROW_REQUIREMENT in requirements:
826 826 return revlognarrowfilestorage
827 827 else:
828 828 return revlogfilestorage
829 829
830 830 # List of repository interfaces and factory functions for them. Each
831 831 # will be called in order during ``makelocalrepository()`` to iteratively
832 832 # derive the final type for a local repository instance. We capture the
833 833 # function as a lambda so we don't hold a reference and the module-level
834 834 # functions can be wrapped.
835 835 REPO_INTERFACES = [
836 836 (repository.ilocalrepositorymain, lambda: makemain),
837 837 (repository.ilocalrepositoryfilestorage, lambda: makefilestorage),
838 838 ]
839 839
840 840 @interfaceutil.implementer(repository.ilocalrepositorymain)
841 841 class localrepository(object):
842 842 """Main class for representing local repositories.
843 843
844 844 All local repositories are instances of this class.
845 845
846 846 Constructed on its own, instances of this class are not usable as
847 847 repository objects. To obtain a usable repository object, call
848 848 ``hg.repository()``, ``localrepo.instance()``, or
849 849 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
850 850 ``instance()`` adds support for creating new repositories.
851 851 ``hg.repository()`` adds more extension integration, including calling
852 852 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
853 853 used.
854 854 """
855 855
856 856 # obsolete experimental requirements:
857 857 # - manifestv2: An experimental new manifest format that allowed
858 858 # for stem compression of long paths. Experiment ended up not
859 859 # being successful (repository sizes went up due to worse delta
860 860 # chains), and the code was deleted in 4.6.
861 861 supportedformats = {
862 862 'revlogv1',
863 863 'generaldelta',
864 864 'treemanifest',
865 865 REVLOGV2_REQUIREMENT,
866 866 SPARSEREVLOG_REQUIREMENT,
867 867 }
868 868 _basesupported = supportedformats | {
869 869 'store',
870 870 'fncache',
871 871 'shared',
872 872 'relshared',
873 873 'dotencode',
874 874 'exp-sparse',
875 875 'internal-phase'
876 876 }
877 877
878 878 # list of prefix for file which can be written without 'wlock'
879 879 # Extensions should extend this list when needed
880 880 _wlockfreeprefix = {
881 881 # We migh consider requiring 'wlock' for the next
882 882 # two, but pretty much all the existing code assume
883 883 # wlock is not needed so we keep them excluded for
884 884 # now.
885 885 'hgrc',
886 886 'requires',
887 887 # XXX cache is a complicatged business someone
888 888 # should investigate this in depth at some point
889 889 'cache/',
890 890 # XXX shouldn't be dirstate covered by the wlock?
891 891 'dirstate',
892 892 # XXX bisect was still a bit too messy at the time
893 893 # this changeset was introduced. Someone should fix
894 894 # the remainig bit and drop this line
895 895 'bisect.state',
896 896 }
897 897
898 898 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
899 899 supportedrequirements, sharedpath, store, cachevfs, wcachevfs,
900 900 features, intents=None):
901 901 """Create a new local repository instance.
902 902
903 903 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
904 904 or ``localrepo.makelocalrepository()`` for obtaining a new repository
905 905 object.
906 906
907 907 Arguments:
908 908
909 909 baseui
910 910 ``ui.ui`` instance that ``ui`` argument was based off of.
911 911
912 912 ui
913 913 ``ui.ui`` instance for use by the repository.
914 914
915 915 origroot
916 916 ``bytes`` path to working directory root of this repository.
917 917
918 918 wdirvfs
919 919 ``vfs.vfs`` rooted at the working directory.
920 920
921 921 hgvfs
922 922 ``vfs.vfs`` rooted at .hg/
923 923
924 924 requirements
925 925 ``set`` of bytestrings representing repository opening requirements.
926 926
927 927 supportedrequirements
928 928 ``set`` of bytestrings representing repository requirements that we
929 929 know how to open. May be a supetset of ``requirements``.
930 930
931 931 sharedpath
932 932 ``bytes`` Defining path to storage base directory. Points to a
933 933 ``.hg/`` directory somewhere.
934 934
935 935 store
936 936 ``store.basicstore`` (or derived) instance providing access to
937 937 versioned storage.
938 938
939 939 cachevfs
940 940 ``vfs.vfs`` used for cache files.
941 941
942 942 wcachevfs
943 943 ``vfs.vfs`` used for cache files related to the working copy.
944 944
945 945 features
946 946 ``set`` of bytestrings defining features/capabilities of this
947 947 instance.
948 948
949 949 intents
950 950 ``set`` of system strings indicating what this repo will be used
951 951 for.
952 952 """
953 953 self.baseui = baseui
954 954 self.ui = ui
955 955 self.origroot = origroot
956 956 # vfs rooted at working directory.
957 957 self.wvfs = wdirvfs
958 958 self.root = wdirvfs.base
959 959 # vfs rooted at .hg/. Used to access most non-store paths.
960 960 self.vfs = hgvfs
961 961 self.path = hgvfs.base
962 962 self.requirements = requirements
963 963 self.supported = supportedrequirements
964 964 self.sharedpath = sharedpath
965 965 self.store = store
966 966 self.cachevfs = cachevfs
967 967 self.wcachevfs = wcachevfs
968 968 self.features = features
969 969
970 970 self.filtername = None
971 971
972 972 if (self.ui.configbool('devel', 'all-warnings') or
973 973 self.ui.configbool('devel', 'check-locks')):
974 974 self.vfs.audit = self._getvfsward(self.vfs.audit)
975 975 # A list of callback to shape the phase if no data were found.
976 976 # Callback are in the form: func(repo, roots) --> processed root.
977 977 # This list it to be filled by extension during repo setup
978 978 self._phasedefaults = []
979 979
980 980 color.setup(self.ui)
981 981
982 982 self.spath = self.store.path
983 983 self.svfs = self.store.vfs
984 984 self.sjoin = self.store.join
985 985 if (self.ui.configbool('devel', 'all-warnings') or
986 986 self.ui.configbool('devel', 'check-locks')):
987 987 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
988 988 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
989 989 else: # standard vfs
990 990 self.svfs.audit = self._getsvfsward(self.svfs.audit)
991 991
992 992 self._dirstatevalidatewarned = False
993 993
994 994 self._branchcaches = {}
995 995 self._revbranchcache = None
996 996 self._filterpats = {}
997 997 self._datafilters = {}
998 998 self._transref = self._lockref = self._wlockref = None
999 999
1000 1000 # A cache for various files under .hg/ that tracks file changes,
1001 1001 # (used by the filecache decorator)
1002 1002 #
1003 1003 # Maps a property name to its util.filecacheentry
1004 1004 self._filecache = {}
1005 1005
1006 1006 # hold sets of revision to be filtered
1007 1007 # should be cleared when something might have changed the filter value:
1008 1008 # - new changesets,
1009 1009 # - phase change,
1010 1010 # - new obsolescence marker,
1011 1011 # - working directory parent change,
1012 1012 # - bookmark changes
1013 1013 self.filteredrevcache = {}
1014 1014
1015 1015 # post-dirstate-status hooks
1016 1016 self._postdsstatus = []
1017 1017
1018 1018 # generic mapping between names and nodes
1019 1019 self.names = namespaces.namespaces()
1020 1020
1021 1021 # Key to signature value.
1022 1022 self._sparsesignaturecache = {}
1023 1023 # Signature to cached matcher instance.
1024 1024 self._sparsematchercache = {}
1025 1025
1026 1026 def _getvfsward(self, origfunc):
1027 1027 """build a ward for self.vfs"""
1028 1028 rref = weakref.ref(self)
1029 1029 def checkvfs(path, mode=None):
1030 1030 ret = origfunc(path, mode=mode)
1031 1031 repo = rref()
1032 1032 if (repo is None
1033 1033 or not util.safehasattr(repo, '_wlockref')
1034 1034 or not util.safehasattr(repo, '_lockref')):
1035 1035 return
1036 1036 if mode in (None, 'r', 'rb'):
1037 1037 return
1038 1038 if path.startswith(repo.path):
1039 1039 # truncate name relative to the repository (.hg)
1040 1040 path = path[len(repo.path) + 1:]
1041 1041 if path.startswith('cache/'):
1042 1042 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1043 1043 repo.ui.develwarn(msg % path, stacklevel=3, config="cache-vfs")
1044 1044 if path.startswith('journal.') or path.startswith('undo.'):
1045 1045 # journal is covered by 'lock'
1046 1046 if repo._currentlock(repo._lockref) is None:
1047 1047 repo.ui.develwarn('write with no lock: "%s"' % path,
1048 1048 stacklevel=3, config='check-locks')
1049 1049 elif repo._currentlock(repo._wlockref) is None:
1050 1050 # rest of vfs files are covered by 'wlock'
1051 1051 #
1052 1052 # exclude special files
1053 1053 for prefix in self._wlockfreeprefix:
1054 1054 if path.startswith(prefix):
1055 1055 return
1056 1056 repo.ui.develwarn('write with no wlock: "%s"' % path,
1057 1057 stacklevel=3, config='check-locks')
1058 1058 return ret
1059 1059 return checkvfs
1060 1060
1061 1061 def _getsvfsward(self, origfunc):
1062 1062 """build a ward for self.svfs"""
1063 1063 rref = weakref.ref(self)
1064 1064 def checksvfs(path, mode=None):
1065 1065 ret = origfunc(path, mode=mode)
1066 1066 repo = rref()
1067 1067 if repo is None or not util.safehasattr(repo, '_lockref'):
1068 1068 return
1069 1069 if mode in (None, 'r', 'rb'):
1070 1070 return
1071 1071 if path.startswith(repo.sharedpath):
1072 1072 # truncate name relative to the repository (.hg)
1073 1073 path = path[len(repo.sharedpath) + 1:]
1074 1074 if repo._currentlock(repo._lockref) is None:
1075 1075 repo.ui.develwarn('write with no lock: "%s"' % path,
1076 1076 stacklevel=4)
1077 1077 return ret
1078 1078 return checksvfs
1079 1079
1080 1080 def close(self):
1081 1081 self._writecaches()
1082 1082
1083 1083 def _writecaches(self):
1084 1084 if self._revbranchcache:
1085 1085 self._revbranchcache.write()
1086 1086
1087 1087 def _restrictcapabilities(self, caps):
1088 1088 if self.ui.configbool('experimental', 'bundle2-advertise'):
1089 1089 caps = set(caps)
1090 1090 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1091 1091 role='client'))
1092 1092 caps.add('bundle2=' + urlreq.quote(capsblob))
1093 1093 return caps
1094 1094
1095 1095 def _writerequirements(self):
1096 1096 scmutil.writerequires(self.vfs, self.requirements)
1097 1097
1098 1098 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1099 1099 # self -> auditor -> self._checknested -> self
1100 1100
1101 1101 @property
1102 1102 def auditor(self):
1103 1103 # This is only used by context.workingctx.match in order to
1104 1104 # detect files in subrepos.
1105 1105 return pathutil.pathauditor(self.root, callback=self._checknested)
1106 1106
1107 1107 @property
1108 1108 def nofsauditor(self):
1109 1109 # This is only used by context.basectx.match in order to detect
1110 1110 # files in subrepos.
1111 1111 return pathutil.pathauditor(self.root, callback=self._checknested,
1112 1112 realfs=False, cached=True)
1113 1113
1114 1114 def _checknested(self, path):
1115 1115 """Determine if path is a legal nested repository."""
1116 1116 if not path.startswith(self.root):
1117 1117 return False
1118 1118 subpath = path[len(self.root) + 1:]
1119 1119 normsubpath = util.pconvert(subpath)
1120 1120
1121 1121 # XXX: Checking against the current working copy is wrong in
1122 1122 # the sense that it can reject things like
1123 1123 #
1124 1124 # $ hg cat -r 10 sub/x.txt
1125 1125 #
1126 1126 # if sub/ is no longer a subrepository in the working copy
1127 1127 # parent revision.
1128 1128 #
1129 1129 # However, it can of course also allow things that would have
1130 1130 # been rejected before, such as the above cat command if sub/
1131 1131 # is a subrepository now, but was a normal directory before.
1132 1132 # The old path auditor would have rejected by mistake since it
1133 1133 # panics when it sees sub/.hg/.
1134 1134 #
1135 1135 # All in all, checking against the working copy seems sensible
1136 1136 # since we want to prevent access to nested repositories on
1137 1137 # the filesystem *now*.
1138 1138 ctx = self[None]
1139 1139 parts = util.splitpath(subpath)
1140 1140 while parts:
1141 1141 prefix = '/'.join(parts)
1142 1142 if prefix in ctx.substate:
1143 1143 if prefix == normsubpath:
1144 1144 return True
1145 1145 else:
1146 1146 sub = ctx.sub(prefix)
1147 1147 return sub.checknested(subpath[len(prefix) + 1:])
1148 1148 else:
1149 1149 parts.pop()
1150 1150 return False
1151 1151
1152 1152 def peer(self):
1153 1153 return localpeer(self) # not cached to avoid reference cycle
1154 1154
1155 1155 def unfiltered(self):
1156 1156 """Return unfiltered version of the repository
1157 1157
1158 1158 Intended to be overwritten by filtered repo."""
1159 1159 return self
1160 1160
1161 1161 def filtered(self, name, visibilityexceptions=None):
1162 1162 """Return a filtered version of a repository"""
1163 1163 cls = repoview.newtype(self.unfiltered().__class__)
1164 1164 return cls(self, name, visibilityexceptions)
1165 1165
1166 1166 @repofilecache('bookmarks', 'bookmarks.current')
1167 1167 def _bookmarks(self):
1168 1168 return bookmarks.bmstore(self)
1169 1169
1170 1170 @property
1171 1171 def _activebookmark(self):
1172 1172 return self._bookmarks.active
1173 1173
1174 1174 # _phasesets depend on changelog. what we need is to call
1175 1175 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1176 1176 # can't be easily expressed in filecache mechanism.
1177 1177 @storecache('phaseroots', '00changelog.i')
1178 1178 def _phasecache(self):
1179 1179 return phases.phasecache(self, self._phasedefaults)
1180 1180
1181 1181 @storecache('obsstore')
1182 1182 def obsstore(self):
1183 1183 return obsolete.makestore(self.ui, self)
1184 1184
1185 1185 @storecache('00changelog.i')
1186 1186 def changelog(self):
1187 1187 return changelog.changelog(self.svfs,
1188 1188 trypending=txnutil.mayhavepending(self.root))
1189 1189
1190 1190 @storecache('00manifest.i')
1191 1191 def manifestlog(self):
1192 1192 rootstore = manifest.manifestrevlog(self.svfs)
1193 1193 return manifest.manifestlog(self.svfs, self, rootstore,
1194 1194 self.narrowmatch())
1195 1195
1196 1196 @repofilecache('dirstate')
1197 1197 def dirstate(self):
1198 1198 return self._makedirstate()
1199 1199
1200 1200 def _makedirstate(self):
1201 1201 """Extension point for wrapping the dirstate per-repo."""
1202 1202 sparsematchfn = lambda: sparse.matcher(self)
1203 1203
1204 1204 return dirstate.dirstate(self.vfs, self.ui, self.root,
1205 1205 self._dirstatevalidate, sparsematchfn)
1206 1206
1207 1207 def _dirstatevalidate(self, node):
1208 1208 try:
1209 1209 self.changelog.rev(node)
1210 1210 return node
1211 1211 except error.LookupError:
1212 1212 if not self._dirstatevalidatewarned:
1213 1213 self._dirstatevalidatewarned = True
1214 1214 self.ui.warn(_("warning: ignoring unknown"
1215 1215 " working parent %s!\n") % short(node))
1216 1216 return nullid
1217 1217
1218 1218 @storecache(narrowspec.FILENAME)
1219 1219 def narrowpats(self):
1220 1220 """matcher patterns for this repository's narrowspec
1221 1221
1222 1222 A tuple of (includes, excludes).
1223 1223 """
1224 1224 return narrowspec.load(self)
1225 1225
1226 1226 @storecache(narrowspec.FILENAME)
1227 1227 def _narrowmatch(self):
1228 1228 if repository.NARROW_REQUIREMENT not in self.requirements:
1229 1229 return matchmod.always(self.root, '')
1230 narrowspec.checkworkingcopynarrowspec(self)
1230 1231 include, exclude = self.narrowpats
1231 1232 return narrowspec.match(self.root, include=include, exclude=exclude)
1232 1233
1233 1234 def narrowmatch(self, match=None, includeexact=False):
1234 1235 """matcher corresponding the the repo's narrowspec
1235 1236
1236 1237 If `match` is given, then that will be intersected with the narrow
1237 1238 matcher.
1238 1239
1239 1240 If `includeexact` is True, then any exact matches from `match` will
1240 1241 be included even if they're outside the narrowspec.
1241 1242 """
1242 1243 if match:
1243 1244 if includeexact and not self._narrowmatch.always():
1244 1245 # do not exclude explicitly-specified paths so that they can
1245 1246 # be warned later on
1246 1247 em = matchmod.exact(match._root, match._cwd, match.files())
1247 1248 nm = matchmod.unionmatcher([self._narrowmatch, em])
1248 1249 return matchmod.intersectmatchers(match, nm)
1249 1250 return matchmod.intersectmatchers(match, self._narrowmatch)
1250 1251 return self._narrowmatch
1251 1252
1252 1253 def setnarrowpats(self, newincludes, newexcludes):
1253 1254 narrowspec.save(self, newincludes, newexcludes)
1255 narrowspec.copytoworkingcopy(self, self.currenttransaction())
1254 1256 self.invalidate(clearfilecache=True)
1257 # So the next access won't be considered a conflict
1258 # TODO: It seems like there should be a way of doing this that
1259 # doesn't involve replacing these attributes.
1260 self.narrowpats = newincludes, newexcludes
1261 self._narrowmatch = narrowspec.match(self.root, include=newincludes,
1262 exclude=newexcludes)
1255 1263
1256 1264 def __getitem__(self, changeid):
1257 1265 if changeid is None:
1258 1266 return context.workingctx(self)
1259 1267 if isinstance(changeid, context.basectx):
1260 1268 return changeid
1261 1269 if isinstance(changeid, slice):
1262 1270 # wdirrev isn't contiguous so the slice shouldn't include it
1263 1271 return [self[i]
1264 1272 for i in pycompat.xrange(*changeid.indices(len(self)))
1265 1273 if i not in self.changelog.filteredrevs]
1266 1274 try:
1267 1275 if isinstance(changeid, int):
1268 1276 node = self.changelog.node(changeid)
1269 1277 rev = changeid
1270 1278 elif changeid == 'null':
1271 1279 node = nullid
1272 1280 rev = nullrev
1273 1281 elif changeid == 'tip':
1274 1282 node = self.changelog.tip()
1275 1283 rev = self.changelog.rev(node)
1276 1284 elif changeid == '.':
1277 1285 # this is a hack to delay/avoid loading obsmarkers
1278 1286 # when we know that '.' won't be hidden
1279 1287 node = self.dirstate.p1()
1280 1288 rev = self.unfiltered().changelog.rev(node)
1281 1289 elif len(changeid) == 20:
1282 1290 try:
1283 1291 node = changeid
1284 1292 rev = self.changelog.rev(changeid)
1285 1293 except error.FilteredLookupError:
1286 1294 changeid = hex(changeid) # for the error message
1287 1295 raise
1288 1296 except LookupError:
1289 1297 # check if it might have come from damaged dirstate
1290 1298 #
1291 1299 # XXX we could avoid the unfiltered if we had a recognizable
1292 1300 # exception for filtered changeset access
1293 1301 if (self.local()
1294 1302 and changeid in self.unfiltered().dirstate.parents()):
1295 1303 msg = _("working directory has unknown parent '%s'!")
1296 1304 raise error.Abort(msg % short(changeid))
1297 1305 changeid = hex(changeid) # for the error message
1298 1306 raise
1299 1307
1300 1308 elif len(changeid) == 40:
1301 1309 node = bin(changeid)
1302 1310 rev = self.changelog.rev(node)
1303 1311 else:
1304 1312 raise error.ProgrammingError(
1305 1313 "unsupported changeid '%s' of type %s" %
1306 1314 (changeid, type(changeid)))
1307 1315
1308 1316 return context.changectx(self, rev, node)
1309 1317
1310 1318 except (error.FilteredIndexError, error.FilteredLookupError):
1311 1319 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1312 1320 % pycompat.bytestr(changeid))
1313 1321 except (IndexError, LookupError):
1314 1322 raise error.RepoLookupError(
1315 1323 _("unknown revision '%s'") % pycompat.bytestr(changeid))
1316 1324 except error.WdirUnsupported:
1317 1325 return context.workingctx(self)
1318 1326
1319 1327 def __contains__(self, changeid):
1320 1328 """True if the given changeid exists
1321 1329
1322 1330 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1323 1331 specified.
1324 1332 """
1325 1333 try:
1326 1334 self[changeid]
1327 1335 return True
1328 1336 except error.RepoLookupError:
1329 1337 return False
1330 1338
1331 1339 def __nonzero__(self):
1332 1340 return True
1333 1341
1334 1342 __bool__ = __nonzero__
1335 1343
1336 1344 def __len__(self):
1337 1345 # no need to pay the cost of repoview.changelog
1338 1346 unfi = self.unfiltered()
1339 1347 return len(unfi.changelog)
1340 1348
1341 1349 def __iter__(self):
1342 1350 return iter(self.changelog)
1343 1351
1344 1352 def revs(self, expr, *args):
1345 1353 '''Find revisions matching a revset.
1346 1354
1347 1355 The revset is specified as a string ``expr`` that may contain
1348 1356 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1349 1357
1350 1358 Revset aliases from the configuration are not expanded. To expand
1351 1359 user aliases, consider calling ``scmutil.revrange()`` or
1352 1360 ``repo.anyrevs([expr], user=True)``.
1353 1361
1354 1362 Returns a revset.abstractsmartset, which is a list-like interface
1355 1363 that contains integer revisions.
1356 1364 '''
1357 1365 expr = revsetlang.formatspec(expr, *args)
1358 1366 m = revset.match(None, expr)
1359 1367 return m(self)
1360 1368
1361 1369 def set(self, expr, *args):
1362 1370 '''Find revisions matching a revset and emit changectx instances.
1363 1371
1364 1372 This is a convenience wrapper around ``revs()`` that iterates the
1365 1373 result and is a generator of changectx instances.
1366 1374
1367 1375 Revset aliases from the configuration are not expanded. To expand
1368 1376 user aliases, consider calling ``scmutil.revrange()``.
1369 1377 '''
1370 1378 for r in self.revs(expr, *args):
1371 1379 yield self[r]
1372 1380
1373 1381 def anyrevs(self, specs, user=False, localalias=None):
1374 1382 '''Find revisions matching one of the given revsets.
1375 1383
1376 1384 Revset aliases from the configuration are not expanded by default. To
1377 1385 expand user aliases, specify ``user=True``. To provide some local
1378 1386 definitions overriding user aliases, set ``localalias`` to
1379 1387 ``{name: definitionstring}``.
1380 1388 '''
1381 1389 if user:
1382 1390 m = revset.matchany(self.ui, specs,
1383 1391 lookup=revset.lookupfn(self),
1384 1392 localalias=localalias)
1385 1393 else:
1386 1394 m = revset.matchany(None, specs, localalias=localalias)
1387 1395 return m(self)
1388 1396
1389 1397 def url(self):
1390 1398 return 'file:' + self.root
1391 1399
1392 1400 def hook(self, name, throw=False, **args):
1393 1401 """Call a hook, passing this repo instance.
1394 1402
1395 1403 This a convenience method to aid invoking hooks. Extensions likely
1396 1404 won't call this unless they have registered a custom hook or are
1397 1405 replacing code that is expected to call a hook.
1398 1406 """
1399 1407 return hook.hook(self.ui, self, name, throw, **args)
1400 1408
1401 1409 @filteredpropertycache
1402 1410 def _tagscache(self):
1403 1411 '''Returns a tagscache object that contains various tags related
1404 1412 caches.'''
1405 1413
1406 1414 # This simplifies its cache management by having one decorated
1407 1415 # function (this one) and the rest simply fetch things from it.
1408 1416 class tagscache(object):
1409 1417 def __init__(self):
1410 1418 # These two define the set of tags for this repository. tags
1411 1419 # maps tag name to node; tagtypes maps tag name to 'global' or
1412 1420 # 'local'. (Global tags are defined by .hgtags across all
1413 1421 # heads, and local tags are defined in .hg/localtags.)
1414 1422 # They constitute the in-memory cache of tags.
1415 1423 self.tags = self.tagtypes = None
1416 1424
1417 1425 self.nodetagscache = self.tagslist = None
1418 1426
1419 1427 cache = tagscache()
1420 1428 cache.tags, cache.tagtypes = self._findtags()
1421 1429
1422 1430 return cache
1423 1431
1424 1432 def tags(self):
1425 1433 '''return a mapping of tag to node'''
1426 1434 t = {}
1427 1435 if self.changelog.filteredrevs:
1428 1436 tags, tt = self._findtags()
1429 1437 else:
1430 1438 tags = self._tagscache.tags
1431 1439 rev = self.changelog.rev
1432 1440 for k, v in tags.iteritems():
1433 1441 try:
1434 1442 # ignore tags to unknown nodes
1435 1443 rev(v)
1436 1444 t[k] = v
1437 1445 except (error.LookupError, ValueError):
1438 1446 pass
1439 1447 return t
1440 1448
1441 1449 def _findtags(self):
1442 1450 '''Do the hard work of finding tags. Return a pair of dicts
1443 1451 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1444 1452 maps tag name to a string like \'global\' or \'local\'.
1445 1453 Subclasses or extensions are free to add their own tags, but
1446 1454 should be aware that the returned dicts will be retained for the
1447 1455 duration of the localrepo object.'''
1448 1456
1449 1457 # XXX what tagtype should subclasses/extensions use? Currently
1450 1458 # mq and bookmarks add tags, but do not set the tagtype at all.
1451 1459 # Should each extension invent its own tag type? Should there
1452 1460 # be one tagtype for all such "virtual" tags? Or is the status
1453 1461 # quo fine?
1454 1462
1455 1463
1456 1464 # map tag name to (node, hist)
1457 1465 alltags = tagsmod.findglobaltags(self.ui, self)
1458 1466 # map tag name to tag type
1459 1467 tagtypes = dict((tag, 'global') for tag in alltags)
1460 1468
1461 1469 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1462 1470
1463 1471 # Build the return dicts. Have to re-encode tag names because
1464 1472 # the tags module always uses UTF-8 (in order not to lose info
1465 1473 # writing to the cache), but the rest of Mercurial wants them in
1466 1474 # local encoding.
1467 1475 tags = {}
1468 1476 for (name, (node, hist)) in alltags.iteritems():
1469 1477 if node != nullid:
1470 1478 tags[encoding.tolocal(name)] = node
1471 1479 tags['tip'] = self.changelog.tip()
1472 1480 tagtypes = dict([(encoding.tolocal(name), value)
1473 1481 for (name, value) in tagtypes.iteritems()])
1474 1482 return (tags, tagtypes)
1475 1483
1476 1484 def tagtype(self, tagname):
1477 1485 '''
1478 1486 return the type of the given tag. result can be:
1479 1487
1480 1488 'local' : a local tag
1481 1489 'global' : a global tag
1482 1490 None : tag does not exist
1483 1491 '''
1484 1492
1485 1493 return self._tagscache.tagtypes.get(tagname)
1486 1494
1487 1495 def tagslist(self):
1488 1496 '''return a list of tags ordered by revision'''
1489 1497 if not self._tagscache.tagslist:
1490 1498 l = []
1491 1499 for t, n in self.tags().iteritems():
1492 1500 l.append((self.changelog.rev(n), t, n))
1493 1501 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1494 1502
1495 1503 return self._tagscache.tagslist
1496 1504
1497 1505 def nodetags(self, node):
1498 1506 '''return the tags associated with a node'''
1499 1507 if not self._tagscache.nodetagscache:
1500 1508 nodetagscache = {}
1501 1509 for t, n in self._tagscache.tags.iteritems():
1502 1510 nodetagscache.setdefault(n, []).append(t)
1503 1511 for tags in nodetagscache.itervalues():
1504 1512 tags.sort()
1505 1513 self._tagscache.nodetagscache = nodetagscache
1506 1514 return self._tagscache.nodetagscache.get(node, [])
1507 1515
1508 1516 def nodebookmarks(self, node):
1509 1517 """return the list of bookmarks pointing to the specified node"""
1510 1518 return self._bookmarks.names(node)
1511 1519
1512 1520 def branchmap(self):
1513 1521 '''returns a dictionary {branch: [branchheads]} with branchheads
1514 1522 ordered by increasing revision number'''
1515 1523 branchmap.updatecache(self)
1516 1524 return self._branchcaches[self.filtername]
1517 1525
1518 1526 @unfilteredmethod
1519 1527 def revbranchcache(self):
1520 1528 if not self._revbranchcache:
1521 1529 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1522 1530 return self._revbranchcache
1523 1531
1524 1532 def branchtip(self, branch, ignoremissing=False):
1525 1533 '''return the tip node for a given branch
1526 1534
1527 1535 If ignoremissing is True, then this method will not raise an error.
1528 1536 This is helpful for callers that only expect None for a missing branch
1529 1537 (e.g. namespace).
1530 1538
1531 1539 '''
1532 1540 try:
1533 1541 return self.branchmap().branchtip(branch)
1534 1542 except KeyError:
1535 1543 if not ignoremissing:
1536 1544 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1537 1545 else:
1538 1546 pass
1539 1547
1540 1548 def lookup(self, key):
1541 1549 return scmutil.revsymbol(self, key).node()
1542 1550
1543 1551 def lookupbranch(self, key):
1544 1552 if key in self.branchmap():
1545 1553 return key
1546 1554
1547 1555 return scmutil.revsymbol(self, key).branch()
1548 1556
1549 1557 def known(self, nodes):
1550 1558 cl = self.changelog
1551 1559 nm = cl.nodemap
1552 1560 filtered = cl.filteredrevs
1553 1561 result = []
1554 1562 for n in nodes:
1555 1563 r = nm.get(n)
1556 1564 resp = not (r is None or r in filtered)
1557 1565 result.append(resp)
1558 1566 return result
1559 1567
1560 1568 def local(self):
1561 1569 return self
1562 1570
1563 1571 def publishing(self):
1564 1572 # it's safe (and desirable) to trust the publish flag unconditionally
1565 1573 # so that we don't finalize changes shared between users via ssh or nfs
1566 1574 return self.ui.configbool('phases', 'publish', untrusted=True)
1567 1575
1568 1576 def cancopy(self):
1569 1577 # so statichttprepo's override of local() works
1570 1578 if not self.local():
1571 1579 return False
1572 1580 if not self.publishing():
1573 1581 return True
1574 1582 # if publishing we can't copy if there is filtered content
1575 1583 return not self.filtered('visible').changelog.filteredrevs
1576 1584
1577 1585 def shared(self):
1578 1586 '''the type of shared repository (None if not shared)'''
1579 1587 if self.sharedpath != self.path:
1580 1588 return 'store'
1581 1589 return None
1582 1590
1583 1591 def wjoin(self, f, *insidef):
1584 1592 return self.vfs.reljoin(self.root, f, *insidef)
1585 1593
1586 1594 def setparents(self, p1, p2=nullid):
1587 1595 with self.dirstate.parentchange():
1588 1596 copies = self.dirstate.setparents(p1, p2)
1589 1597 pctx = self[p1]
1590 1598 if copies:
1591 1599 # Adjust copy records, the dirstate cannot do it, it
1592 1600 # requires access to parents manifests. Preserve them
1593 1601 # only for entries added to first parent.
1594 1602 for f in copies:
1595 1603 if f not in pctx and copies[f] in pctx:
1596 1604 self.dirstate.copy(copies[f], f)
1597 1605 if p2 == nullid:
1598 1606 for f, s in sorted(self.dirstate.copies().items()):
1599 1607 if f not in pctx and s not in pctx:
1600 1608 self.dirstate.copy(None, f)
1601 1609
1602 1610 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1603 1611 """changeid must be a changeset revision, if specified.
1604 1612 fileid can be a file revision or node."""
1605 1613 return context.filectx(self, path, changeid, fileid,
1606 1614 changectx=changectx)
1607 1615
1608 1616 def getcwd(self):
1609 1617 return self.dirstate.getcwd()
1610 1618
1611 1619 def pathto(self, f, cwd=None):
1612 1620 return self.dirstate.pathto(f, cwd)
1613 1621
1614 1622 def _loadfilter(self, filter):
1615 1623 if filter not in self._filterpats:
1616 1624 l = []
1617 1625 for pat, cmd in self.ui.configitems(filter):
1618 1626 if cmd == '!':
1619 1627 continue
1620 1628 mf = matchmod.match(self.root, '', [pat])
1621 1629 fn = None
1622 1630 params = cmd
1623 1631 for name, filterfn in self._datafilters.iteritems():
1624 1632 if cmd.startswith(name):
1625 1633 fn = filterfn
1626 1634 params = cmd[len(name):].lstrip()
1627 1635 break
1628 1636 if not fn:
1629 1637 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1630 1638 # Wrap old filters not supporting keyword arguments
1631 1639 if not pycompat.getargspec(fn)[2]:
1632 1640 oldfn = fn
1633 1641 fn = lambda s, c, **kwargs: oldfn(s, c)
1634 1642 l.append((mf, fn, params))
1635 1643 self._filterpats[filter] = l
1636 1644 return self._filterpats[filter]
1637 1645
1638 1646 def _filter(self, filterpats, filename, data):
1639 1647 for mf, fn, cmd in filterpats:
1640 1648 if mf(filename):
1641 1649 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1642 1650 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1643 1651 break
1644 1652
1645 1653 return data
1646 1654
1647 1655 @unfilteredpropertycache
1648 1656 def _encodefilterpats(self):
1649 1657 return self._loadfilter('encode')
1650 1658
1651 1659 @unfilteredpropertycache
1652 1660 def _decodefilterpats(self):
1653 1661 return self._loadfilter('decode')
1654 1662
1655 1663 def adddatafilter(self, name, filter):
1656 1664 self._datafilters[name] = filter
1657 1665
1658 1666 def wread(self, filename):
1659 1667 if self.wvfs.islink(filename):
1660 1668 data = self.wvfs.readlink(filename)
1661 1669 else:
1662 1670 data = self.wvfs.read(filename)
1663 1671 return self._filter(self._encodefilterpats, filename, data)
1664 1672
1665 1673 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1666 1674 """write ``data`` into ``filename`` in the working directory
1667 1675
1668 1676 This returns length of written (maybe decoded) data.
1669 1677 """
1670 1678 data = self._filter(self._decodefilterpats, filename, data)
1671 1679 if 'l' in flags:
1672 1680 self.wvfs.symlink(data, filename)
1673 1681 else:
1674 1682 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1675 1683 **kwargs)
1676 1684 if 'x' in flags:
1677 1685 self.wvfs.setflags(filename, False, True)
1678 1686 else:
1679 1687 self.wvfs.setflags(filename, False, False)
1680 1688 return len(data)
1681 1689
1682 1690 def wwritedata(self, filename, data):
1683 1691 return self._filter(self._decodefilterpats, filename, data)
1684 1692
1685 1693 def currenttransaction(self):
1686 1694 """return the current transaction or None if non exists"""
1687 1695 if self._transref:
1688 1696 tr = self._transref()
1689 1697 else:
1690 1698 tr = None
1691 1699
1692 1700 if tr and tr.running():
1693 1701 return tr
1694 1702 return None
1695 1703
1696 1704 def transaction(self, desc, report=None):
1697 1705 if (self.ui.configbool('devel', 'all-warnings')
1698 1706 or self.ui.configbool('devel', 'check-locks')):
1699 1707 if self._currentlock(self._lockref) is None:
1700 1708 raise error.ProgrammingError('transaction requires locking')
1701 1709 tr = self.currenttransaction()
1702 1710 if tr is not None:
1703 1711 return tr.nest(name=desc)
1704 1712
1705 1713 # abort here if the journal already exists
1706 1714 if self.svfs.exists("journal"):
1707 1715 raise error.RepoError(
1708 1716 _("abandoned transaction found"),
1709 1717 hint=_("run 'hg recover' to clean up transaction"))
1710 1718
1711 1719 idbase = "%.40f#%f" % (random.random(), time.time())
1712 1720 ha = hex(hashlib.sha1(idbase).digest())
1713 1721 txnid = 'TXN:' + ha
1714 1722 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1715 1723
1716 1724 self._writejournal(desc)
1717 1725 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1718 1726 if report:
1719 1727 rp = report
1720 1728 else:
1721 1729 rp = self.ui.warn
1722 1730 vfsmap = {'plain': self.vfs, 'store': self.svfs} # root of .hg/
1723 1731 # we must avoid cyclic reference between repo and transaction.
1724 1732 reporef = weakref.ref(self)
1725 1733 # Code to track tag movement
1726 1734 #
1727 1735 # Since tags are all handled as file content, it is actually quite hard
1728 1736 # to track these movement from a code perspective. So we fallback to a
1729 1737 # tracking at the repository level. One could envision to track changes
1730 1738 # to the '.hgtags' file through changegroup apply but that fails to
1731 1739 # cope with case where transaction expose new heads without changegroup
1732 1740 # being involved (eg: phase movement).
1733 1741 #
1734 1742 # For now, We gate the feature behind a flag since this likely comes
1735 1743 # with performance impacts. The current code run more often than needed
1736 1744 # and do not use caches as much as it could. The current focus is on
1737 1745 # the behavior of the feature so we disable it by default. The flag
1738 1746 # will be removed when we are happy with the performance impact.
1739 1747 #
1740 1748 # Once this feature is no longer experimental move the following
1741 1749 # documentation to the appropriate help section:
1742 1750 #
1743 1751 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1744 1752 # tags (new or changed or deleted tags). In addition the details of
1745 1753 # these changes are made available in a file at:
1746 1754 # ``REPOROOT/.hg/changes/tags.changes``.
1747 1755 # Make sure you check for HG_TAG_MOVED before reading that file as it
1748 1756 # might exist from a previous transaction even if no tag were touched
1749 1757 # in this one. Changes are recorded in a line base format::
1750 1758 #
1751 1759 # <action> <hex-node> <tag-name>\n
1752 1760 #
1753 1761 # Actions are defined as follow:
1754 1762 # "-R": tag is removed,
1755 1763 # "+A": tag is added,
1756 1764 # "-M": tag is moved (old value),
1757 1765 # "+M": tag is moved (new value),
1758 1766 tracktags = lambda x: None
1759 1767 # experimental config: experimental.hook-track-tags
1760 1768 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1761 1769 if desc != 'strip' and shouldtracktags:
1762 1770 oldheads = self.changelog.headrevs()
1763 1771 def tracktags(tr2):
1764 1772 repo = reporef()
1765 1773 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1766 1774 newheads = repo.changelog.headrevs()
1767 1775 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1768 1776 # notes: we compare lists here.
1769 1777 # As we do it only once buiding set would not be cheaper
1770 1778 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1771 1779 if changes:
1772 1780 tr2.hookargs['tag_moved'] = '1'
1773 1781 with repo.vfs('changes/tags.changes', 'w',
1774 1782 atomictemp=True) as changesfile:
1775 1783 # note: we do not register the file to the transaction
1776 1784 # because we needs it to still exist on the transaction
1777 1785 # is close (for txnclose hooks)
1778 1786 tagsmod.writediff(changesfile, changes)
1779 1787 def validate(tr2):
1780 1788 """will run pre-closing hooks"""
1781 1789 # XXX the transaction API is a bit lacking here so we take a hacky
1782 1790 # path for now
1783 1791 #
1784 1792 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1785 1793 # dict is copied before these run. In addition we needs the data
1786 1794 # available to in memory hooks too.
1787 1795 #
1788 1796 # Moreover, we also need to make sure this runs before txnclose
1789 1797 # hooks and there is no "pending" mechanism that would execute
1790 1798 # logic only if hooks are about to run.
1791 1799 #
1792 1800 # Fixing this limitation of the transaction is also needed to track
1793 1801 # other families of changes (bookmarks, phases, obsolescence).
1794 1802 #
1795 1803 # This will have to be fixed before we remove the experimental
1796 1804 # gating.
1797 1805 tracktags(tr2)
1798 1806 repo = reporef()
1799 1807 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1800 1808 scmutil.enforcesinglehead(repo, tr2, desc)
1801 1809 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1802 1810 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1803 1811 args = tr.hookargs.copy()
1804 1812 args.update(bookmarks.preparehookargs(name, old, new))
1805 1813 repo.hook('pretxnclose-bookmark', throw=True,
1806 1814 txnname=desc,
1807 1815 **pycompat.strkwargs(args))
1808 1816 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1809 1817 cl = repo.unfiltered().changelog
1810 1818 for rev, (old, new) in tr.changes['phases'].items():
1811 1819 args = tr.hookargs.copy()
1812 1820 node = hex(cl.node(rev))
1813 1821 args.update(phases.preparehookargs(node, old, new))
1814 1822 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1815 1823 **pycompat.strkwargs(args))
1816 1824
1817 1825 repo.hook('pretxnclose', throw=True,
1818 1826 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1819 1827 def releasefn(tr, success):
1820 1828 repo = reporef()
1821 1829 if success:
1822 1830 # this should be explicitly invoked here, because
1823 1831 # in-memory changes aren't written out at closing
1824 1832 # transaction, if tr.addfilegenerator (via
1825 1833 # dirstate.write or so) isn't invoked while
1826 1834 # transaction running
1827 1835 repo.dirstate.write(None)
1828 1836 else:
1829 1837 # discard all changes (including ones already written
1830 1838 # out) in this transaction
1831 1839 narrowspec.restorebackup(self, 'journal.narrowspec')
1832 1840 repo.dirstate.restorebackup(None, 'journal.dirstate')
1833 1841
1834 1842 repo.invalidate(clearfilecache=True)
1835 1843
1836 1844 tr = transaction.transaction(rp, self.svfs, vfsmap,
1837 1845 "journal",
1838 1846 "undo",
1839 1847 aftertrans(renames),
1840 1848 self.store.createmode,
1841 1849 validator=validate,
1842 1850 releasefn=releasefn,
1843 1851 checkambigfiles=_cachedfiles,
1844 1852 name=desc)
1845 1853 tr.changes['origrepolen'] = len(self)
1846 1854 tr.changes['obsmarkers'] = set()
1847 1855 tr.changes['phases'] = {}
1848 1856 tr.changes['bookmarks'] = {}
1849 1857
1850 1858 tr.hookargs['txnid'] = txnid
1851 1859 # note: writing the fncache only during finalize mean that the file is
1852 1860 # outdated when running hooks. As fncache is used for streaming clone,
1853 1861 # this is not expected to break anything that happen during the hooks.
1854 1862 tr.addfinalize('flush-fncache', self.store.write)
1855 1863 def txnclosehook(tr2):
1856 1864 """To be run if transaction is successful, will schedule a hook run
1857 1865 """
1858 1866 # Don't reference tr2 in hook() so we don't hold a reference.
1859 1867 # This reduces memory consumption when there are multiple
1860 1868 # transactions per lock. This can likely go away if issue5045
1861 1869 # fixes the function accumulation.
1862 1870 hookargs = tr2.hookargs
1863 1871
1864 1872 def hookfunc():
1865 1873 repo = reporef()
1866 1874 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1867 1875 bmchanges = sorted(tr.changes['bookmarks'].items())
1868 1876 for name, (old, new) in bmchanges:
1869 1877 args = tr.hookargs.copy()
1870 1878 args.update(bookmarks.preparehookargs(name, old, new))
1871 1879 repo.hook('txnclose-bookmark', throw=False,
1872 1880 txnname=desc, **pycompat.strkwargs(args))
1873 1881
1874 1882 if hook.hashook(repo.ui, 'txnclose-phase'):
1875 1883 cl = repo.unfiltered().changelog
1876 1884 phasemv = sorted(tr.changes['phases'].items())
1877 1885 for rev, (old, new) in phasemv:
1878 1886 args = tr.hookargs.copy()
1879 1887 node = hex(cl.node(rev))
1880 1888 args.update(phases.preparehookargs(node, old, new))
1881 1889 repo.hook('txnclose-phase', throw=False, txnname=desc,
1882 1890 **pycompat.strkwargs(args))
1883 1891
1884 1892 repo.hook('txnclose', throw=False, txnname=desc,
1885 1893 **pycompat.strkwargs(hookargs))
1886 1894 reporef()._afterlock(hookfunc)
1887 1895 tr.addfinalize('txnclose-hook', txnclosehook)
1888 1896 # Include a leading "-" to make it happen before the transaction summary
1889 1897 # reports registered via scmutil.registersummarycallback() whose names
1890 1898 # are 00-txnreport etc. That way, the caches will be warm when the
1891 1899 # callbacks run.
1892 1900 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1893 1901 def txnaborthook(tr2):
1894 1902 """To be run if transaction is aborted
1895 1903 """
1896 1904 reporef().hook('txnabort', throw=False, txnname=desc,
1897 1905 **pycompat.strkwargs(tr2.hookargs))
1898 1906 tr.addabort('txnabort-hook', txnaborthook)
1899 1907 # avoid eager cache invalidation. in-memory data should be identical
1900 1908 # to stored data if transaction has no error.
1901 1909 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1902 1910 self._transref = weakref.ref(tr)
1903 1911 scmutil.registersummarycallback(self, tr, desc)
1904 1912 return tr
1905 1913
1906 1914 def _journalfiles(self):
1907 1915 return ((self.svfs, 'journal'),
1908 1916 (self.vfs, 'journal.dirstate'),
1909 1917 (self.vfs, 'journal.branch'),
1910 1918 (self.vfs, 'journal.desc'),
1911 1919 (self.vfs, 'journal.bookmarks'),
1912 1920 (self.svfs, 'journal.phaseroots'))
1913 1921
1914 1922 def undofiles(self):
1915 1923 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1916 1924
1917 1925 @unfilteredmethod
1918 1926 def _writejournal(self, desc):
1919 1927 self.dirstate.savebackup(None, 'journal.dirstate')
1920 1928 narrowspec.savebackup(self, 'journal.narrowspec')
1921 1929 self.vfs.write("journal.branch",
1922 1930 encoding.fromlocal(self.dirstate.branch()))
1923 1931 self.vfs.write("journal.desc",
1924 1932 "%d\n%s\n" % (len(self), desc))
1925 1933 self.vfs.write("journal.bookmarks",
1926 1934 self.vfs.tryread("bookmarks"))
1927 1935 self.svfs.write("journal.phaseroots",
1928 1936 self.svfs.tryread("phaseroots"))
1929 1937
1930 1938 def recover(self):
1931 1939 with self.lock():
1932 1940 if self.svfs.exists("journal"):
1933 1941 self.ui.status(_("rolling back interrupted transaction\n"))
1934 1942 vfsmap = {'': self.svfs,
1935 1943 'plain': self.vfs,}
1936 1944 transaction.rollback(self.svfs, vfsmap, "journal",
1937 1945 self.ui.warn,
1938 1946 checkambigfiles=_cachedfiles)
1939 1947 self.invalidate()
1940 1948 return True
1941 1949 else:
1942 1950 self.ui.warn(_("no interrupted transaction available\n"))
1943 1951 return False
1944 1952
1945 1953 def rollback(self, dryrun=False, force=False):
1946 1954 wlock = lock = dsguard = None
1947 1955 try:
1948 1956 wlock = self.wlock()
1949 1957 lock = self.lock()
1950 1958 if self.svfs.exists("undo"):
1951 1959 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1952 1960
1953 1961 return self._rollback(dryrun, force, dsguard)
1954 1962 else:
1955 1963 self.ui.warn(_("no rollback information available\n"))
1956 1964 return 1
1957 1965 finally:
1958 1966 release(dsguard, lock, wlock)
1959 1967
1960 1968 @unfilteredmethod # Until we get smarter cache management
1961 1969 def _rollback(self, dryrun, force, dsguard):
1962 1970 ui = self.ui
1963 1971 try:
1964 1972 args = self.vfs.read('undo.desc').splitlines()
1965 1973 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1966 1974 if len(args) >= 3:
1967 1975 detail = args[2]
1968 1976 oldtip = oldlen - 1
1969 1977
1970 1978 if detail and ui.verbose:
1971 1979 msg = (_('repository tip rolled back to revision %d'
1972 1980 ' (undo %s: %s)\n')
1973 1981 % (oldtip, desc, detail))
1974 1982 else:
1975 1983 msg = (_('repository tip rolled back to revision %d'
1976 1984 ' (undo %s)\n')
1977 1985 % (oldtip, desc))
1978 1986 except IOError:
1979 1987 msg = _('rolling back unknown transaction\n')
1980 1988 desc = None
1981 1989
1982 1990 if not force and self['.'] != self['tip'] and desc == 'commit':
1983 1991 raise error.Abort(
1984 1992 _('rollback of last commit while not checked out '
1985 1993 'may lose data'), hint=_('use -f to force'))
1986 1994
1987 1995 ui.status(msg)
1988 1996 if dryrun:
1989 1997 return 0
1990 1998
1991 1999 parents = self.dirstate.parents()
1992 2000 self.destroying()
1993 2001 vfsmap = {'plain': self.vfs, '': self.svfs}
1994 2002 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1995 2003 checkambigfiles=_cachedfiles)
1996 2004 if self.vfs.exists('undo.bookmarks'):
1997 2005 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1998 2006 if self.svfs.exists('undo.phaseroots'):
1999 2007 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
2000 2008 self.invalidate()
2001 2009
2002 2010 parentgone = (parents[0] not in self.changelog.nodemap or
2003 2011 parents[1] not in self.changelog.nodemap)
2004 2012 if parentgone:
2005 2013 # prevent dirstateguard from overwriting already restored one
2006 2014 dsguard.close()
2007 2015
2008 2016 narrowspec.restorebackup(self, 'undo.narrowspec')
2009 2017 self.dirstate.restorebackup(None, 'undo.dirstate')
2010 2018 try:
2011 2019 branch = self.vfs.read('undo.branch')
2012 2020 self.dirstate.setbranch(encoding.tolocal(branch))
2013 2021 except IOError:
2014 2022 ui.warn(_('named branch could not be reset: '
2015 2023 'current branch is still \'%s\'\n')
2016 2024 % self.dirstate.branch())
2017 2025
2018 2026 parents = tuple([p.rev() for p in self[None].parents()])
2019 2027 if len(parents) > 1:
2020 2028 ui.status(_('working directory now based on '
2021 2029 'revisions %d and %d\n') % parents)
2022 2030 else:
2023 2031 ui.status(_('working directory now based on '
2024 2032 'revision %d\n') % parents)
2025 2033 mergemod.mergestate.clean(self, self['.'].node())
2026 2034
2027 2035 # TODO: if we know which new heads may result from this rollback, pass
2028 2036 # them to destroy(), which will prevent the branchhead cache from being
2029 2037 # invalidated.
2030 2038 self.destroyed()
2031 2039 return 0
2032 2040
2033 2041 def _buildcacheupdater(self, newtransaction):
2034 2042 """called during transaction to build the callback updating cache
2035 2043
2036 2044 Lives on the repository to help extension who might want to augment
2037 2045 this logic. For this purpose, the created transaction is passed to the
2038 2046 method.
2039 2047 """
2040 2048 # we must avoid cyclic reference between repo and transaction.
2041 2049 reporef = weakref.ref(self)
2042 2050 def updater(tr):
2043 2051 repo = reporef()
2044 2052 repo.updatecaches(tr)
2045 2053 return updater
2046 2054
2047 2055 @unfilteredmethod
2048 2056 def updatecaches(self, tr=None, full=False):
2049 2057 """warm appropriate caches
2050 2058
2051 2059 If this function is called after a transaction closed. The transaction
2052 2060 will be available in the 'tr' argument. This can be used to selectively
2053 2061 update caches relevant to the changes in that transaction.
2054 2062
2055 2063 If 'full' is set, make sure all caches the function knows about have
2056 2064 up-to-date data. Even the ones usually loaded more lazily.
2057 2065 """
2058 2066 if tr is not None and tr.hookargs.get('source') == 'strip':
2059 2067 # During strip, many caches are invalid but
2060 2068 # later call to `destroyed` will refresh them.
2061 2069 return
2062 2070
2063 2071 if tr is None or tr.changes['origrepolen'] < len(self):
2064 2072 # updating the unfiltered branchmap should refresh all the others,
2065 2073 self.ui.debug('updating the branch cache\n')
2066 2074 branchmap.updatecache(self.filtered('served'))
2067 2075
2068 2076 if full:
2069 2077 rbc = self.revbranchcache()
2070 2078 for r in self.changelog:
2071 2079 rbc.branchinfo(r)
2072 2080 rbc.write()
2073 2081
2074 2082 # ensure the working copy parents are in the manifestfulltextcache
2075 2083 for ctx in self['.'].parents():
2076 2084 ctx.manifest() # accessing the manifest is enough
2077 2085
2078 2086 def invalidatecaches(self):
2079 2087
2080 2088 if r'_tagscache' in vars(self):
2081 2089 # can't use delattr on proxy
2082 2090 del self.__dict__[r'_tagscache']
2083 2091
2084 2092 self.unfiltered()._branchcaches.clear()
2085 2093 self.invalidatevolatilesets()
2086 2094 self._sparsesignaturecache.clear()
2087 2095
2088 2096 def invalidatevolatilesets(self):
2089 2097 self.filteredrevcache.clear()
2090 2098 obsolete.clearobscaches(self)
2091 2099
2092 2100 def invalidatedirstate(self):
2093 2101 '''Invalidates the dirstate, causing the next call to dirstate
2094 2102 to check if it was modified since the last time it was read,
2095 2103 rereading it if it has.
2096 2104
2097 2105 This is different to dirstate.invalidate() that it doesn't always
2098 2106 rereads the dirstate. Use dirstate.invalidate() if you want to
2099 2107 explicitly read the dirstate again (i.e. restoring it to a previous
2100 2108 known good state).'''
2101 2109 if hasunfilteredcache(self, r'dirstate'):
2102 2110 for k in self.dirstate._filecache:
2103 2111 try:
2104 2112 delattr(self.dirstate, k)
2105 2113 except AttributeError:
2106 2114 pass
2107 2115 delattr(self.unfiltered(), r'dirstate')
2108 2116
2109 2117 def invalidate(self, clearfilecache=False):
2110 2118 '''Invalidates both store and non-store parts other than dirstate
2111 2119
2112 2120 If a transaction is running, invalidation of store is omitted,
2113 2121 because discarding in-memory changes might cause inconsistency
2114 2122 (e.g. incomplete fncache causes unintentional failure, but
2115 2123 redundant one doesn't).
2116 2124 '''
2117 2125 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2118 2126 for k in list(self._filecache.keys()):
2119 2127 # dirstate is invalidated separately in invalidatedirstate()
2120 2128 if k == 'dirstate':
2121 2129 continue
2122 2130 if (k == 'changelog' and
2123 2131 self.currenttransaction() and
2124 2132 self.changelog._delayed):
2125 2133 # The changelog object may store unwritten revisions. We don't
2126 2134 # want to lose them.
2127 2135 # TODO: Solve the problem instead of working around it.
2128 2136 continue
2129 2137
2130 2138 if clearfilecache:
2131 2139 del self._filecache[k]
2132 2140 try:
2133 2141 delattr(unfiltered, k)
2134 2142 except AttributeError:
2135 2143 pass
2136 2144 self.invalidatecaches()
2137 2145 if not self.currenttransaction():
2138 2146 # TODO: Changing contents of store outside transaction
2139 2147 # causes inconsistency. We should make in-memory store
2140 2148 # changes detectable, and abort if changed.
2141 2149 self.store.invalidatecaches()
2142 2150
2143 2151 def invalidateall(self):
2144 2152 '''Fully invalidates both store and non-store parts, causing the
2145 2153 subsequent operation to reread any outside changes.'''
2146 2154 # extension should hook this to invalidate its caches
2147 2155 self.invalidate()
2148 2156 self.invalidatedirstate()
2149 2157
2150 2158 @unfilteredmethod
2151 2159 def _refreshfilecachestats(self, tr):
2152 2160 """Reload stats of cached files so that they are flagged as valid"""
2153 2161 for k, ce in self._filecache.items():
2154 2162 k = pycompat.sysstr(k)
2155 2163 if k == r'dirstate' or k not in self.__dict__:
2156 2164 continue
2157 2165 ce.refresh()
2158 2166
2159 2167 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2160 2168 inheritchecker=None, parentenvvar=None):
2161 2169 parentlock = None
2162 2170 # the contents of parentenvvar are used by the underlying lock to
2163 2171 # determine whether it can be inherited
2164 2172 if parentenvvar is not None:
2165 2173 parentlock = encoding.environ.get(parentenvvar)
2166 2174
2167 2175 timeout = 0
2168 2176 warntimeout = 0
2169 2177 if wait:
2170 2178 timeout = self.ui.configint("ui", "timeout")
2171 2179 warntimeout = self.ui.configint("ui", "timeout.warn")
2172 2180 # internal config: ui.signal-safe-lock
2173 2181 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2174 2182
2175 2183 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2176 2184 releasefn=releasefn,
2177 2185 acquirefn=acquirefn, desc=desc,
2178 2186 inheritchecker=inheritchecker,
2179 2187 parentlock=parentlock,
2180 2188 signalsafe=signalsafe)
2181 2189 return l
2182 2190
2183 2191 def _afterlock(self, callback):
2184 2192 """add a callback to be run when the repository is fully unlocked
2185 2193
2186 2194 The callback will be executed when the outermost lock is released
2187 2195 (with wlock being higher level than 'lock')."""
2188 2196 for ref in (self._wlockref, self._lockref):
2189 2197 l = ref and ref()
2190 2198 if l and l.held:
2191 2199 l.postrelease.append(callback)
2192 2200 break
2193 2201 else: # no lock have been found.
2194 2202 callback()
2195 2203
2196 2204 def lock(self, wait=True):
2197 2205 '''Lock the repository store (.hg/store) and return a weak reference
2198 2206 to the lock. Use this before modifying the store (e.g. committing or
2199 2207 stripping). If you are opening a transaction, get a lock as well.)
2200 2208
2201 2209 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2202 2210 'wlock' first to avoid a dead-lock hazard.'''
2203 2211 l = self._currentlock(self._lockref)
2204 2212 if l is not None:
2205 2213 l.lock()
2206 2214 return l
2207 2215
2208 2216 l = self._lock(self.svfs, "lock", wait, None,
2209 2217 self.invalidate, _('repository %s') % self.origroot)
2210 2218 self._lockref = weakref.ref(l)
2211 2219 return l
2212 2220
2213 2221 def _wlockchecktransaction(self):
2214 2222 if self.currenttransaction() is not None:
2215 2223 raise error.LockInheritanceContractViolation(
2216 2224 'wlock cannot be inherited in the middle of a transaction')
2217 2225
2218 2226 def wlock(self, wait=True):
2219 2227 '''Lock the non-store parts of the repository (everything under
2220 2228 .hg except .hg/store) and return a weak reference to the lock.
2221 2229
2222 2230 Use this before modifying files in .hg.
2223 2231
2224 2232 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2225 2233 'wlock' first to avoid a dead-lock hazard.'''
2226 2234 l = self._wlockref and self._wlockref()
2227 2235 if l is not None and l.held:
2228 2236 l.lock()
2229 2237 return l
2230 2238
2231 2239 # We do not need to check for non-waiting lock acquisition. Such
2232 2240 # acquisition would not cause dead-lock as they would just fail.
2233 2241 if wait and (self.ui.configbool('devel', 'all-warnings')
2234 2242 or self.ui.configbool('devel', 'check-locks')):
2235 2243 if self._currentlock(self._lockref) is not None:
2236 2244 self.ui.develwarn('"wlock" acquired after "lock"')
2237 2245
2238 2246 def unlock():
2239 2247 if self.dirstate.pendingparentchange():
2240 2248 self.dirstate.invalidate()
2241 2249 else:
2242 2250 self.dirstate.write(None)
2243 2251
2244 2252 self._filecache['dirstate'].refresh()
2245 2253
2246 2254 l = self._lock(self.vfs, "wlock", wait, unlock,
2247 2255 self.invalidatedirstate, _('working directory of %s') %
2248 2256 self.origroot,
2249 2257 inheritchecker=self._wlockchecktransaction,
2250 2258 parentenvvar='HG_WLOCK_LOCKER')
2251 2259 self._wlockref = weakref.ref(l)
2252 2260 return l
2253 2261
2254 2262 def _currentlock(self, lockref):
2255 2263 """Returns the lock if it's held, or None if it's not."""
2256 2264 if lockref is None:
2257 2265 return None
2258 2266 l = lockref()
2259 2267 if l is None or not l.held:
2260 2268 return None
2261 2269 return l
2262 2270
2263 2271 def currentwlock(self):
2264 2272 """Returns the wlock if it's held, or None if it's not."""
2265 2273 return self._currentlock(self._wlockref)
2266 2274
2267 2275 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2268 2276 """
2269 2277 commit an individual file as part of a larger transaction
2270 2278 """
2271 2279
2272 2280 fname = fctx.path()
2273 2281 fparent1 = manifest1.get(fname, nullid)
2274 2282 fparent2 = manifest2.get(fname, nullid)
2275 2283 if isinstance(fctx, context.filectx):
2276 2284 node = fctx.filenode()
2277 2285 if node in [fparent1, fparent2]:
2278 2286 self.ui.debug('reusing %s filelog entry\n' % fname)
2279 2287 if manifest1.flags(fname) != fctx.flags():
2280 2288 changelist.append(fname)
2281 2289 return node
2282 2290
2283 2291 flog = self.file(fname)
2284 2292 meta = {}
2285 2293 copy = fctx.renamed()
2286 2294 if copy and copy[0] != fname:
2287 2295 # Mark the new revision of this file as a copy of another
2288 2296 # file. This copy data will effectively act as a parent
2289 2297 # of this new revision. If this is a merge, the first
2290 2298 # parent will be the nullid (meaning "look up the copy data")
2291 2299 # and the second one will be the other parent. For example:
2292 2300 #
2293 2301 # 0 --- 1 --- 3 rev1 changes file foo
2294 2302 # \ / rev2 renames foo to bar and changes it
2295 2303 # \- 2 -/ rev3 should have bar with all changes and
2296 2304 # should record that bar descends from
2297 2305 # bar in rev2 and foo in rev1
2298 2306 #
2299 2307 # this allows this merge to succeed:
2300 2308 #
2301 2309 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2302 2310 # \ / merging rev3 and rev4 should use bar@rev2
2303 2311 # \- 2 --- 4 as the merge base
2304 2312 #
2305 2313
2306 2314 cfname = copy[0]
2307 2315 crev = manifest1.get(cfname)
2308 2316 newfparent = fparent2
2309 2317
2310 2318 if manifest2: # branch merge
2311 2319 if fparent2 == nullid or crev is None: # copied on remote side
2312 2320 if cfname in manifest2:
2313 2321 crev = manifest2[cfname]
2314 2322 newfparent = fparent1
2315 2323
2316 2324 # Here, we used to search backwards through history to try to find
2317 2325 # where the file copy came from if the source of a copy was not in
2318 2326 # the parent directory. However, this doesn't actually make sense to
2319 2327 # do (what does a copy from something not in your working copy even
2320 2328 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2321 2329 # the user that copy information was dropped, so if they didn't
2322 2330 # expect this outcome it can be fixed, but this is the correct
2323 2331 # behavior in this circumstance.
2324 2332
2325 2333 if crev:
2326 2334 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2327 2335 meta["copy"] = cfname
2328 2336 meta["copyrev"] = hex(crev)
2329 2337 fparent1, fparent2 = nullid, newfparent
2330 2338 else:
2331 2339 self.ui.warn(_("warning: can't find ancestor for '%s' "
2332 2340 "copied from '%s'!\n") % (fname, cfname))
2333 2341
2334 2342 elif fparent1 == nullid:
2335 2343 fparent1, fparent2 = fparent2, nullid
2336 2344 elif fparent2 != nullid:
2337 2345 # is one parent an ancestor of the other?
2338 2346 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2339 2347 if fparent1 in fparentancestors:
2340 2348 fparent1, fparent2 = fparent2, nullid
2341 2349 elif fparent2 in fparentancestors:
2342 2350 fparent2 = nullid
2343 2351
2344 2352 # is the file changed?
2345 2353 text = fctx.data()
2346 2354 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2347 2355 changelist.append(fname)
2348 2356 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2349 2357 # are just the flags changed during merge?
2350 2358 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2351 2359 changelist.append(fname)
2352 2360
2353 2361 return fparent1
2354 2362
2355 2363 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2356 2364 """check for commit arguments that aren't committable"""
2357 2365 if match.isexact() or match.prefix():
2358 2366 matched = set(status.modified + status.added + status.removed)
2359 2367
2360 2368 for f in match.files():
2361 2369 f = self.dirstate.normalize(f)
2362 2370 if f == '.' or f in matched or f in wctx.substate:
2363 2371 continue
2364 2372 if f in status.deleted:
2365 2373 fail(f, _('file not found!'))
2366 2374 if f in vdirs: # visited directory
2367 2375 d = f + '/'
2368 2376 for mf in matched:
2369 2377 if mf.startswith(d):
2370 2378 break
2371 2379 else:
2372 2380 fail(f, _("no match under directory!"))
2373 2381 elif f not in self.dirstate:
2374 2382 fail(f, _("file not tracked!"))
2375 2383
2376 2384 @unfilteredmethod
2377 2385 def commit(self, text="", user=None, date=None, match=None, force=False,
2378 2386 editor=False, extra=None):
2379 2387 """Add a new revision to current repository.
2380 2388
2381 2389 Revision information is gathered from the working directory,
2382 2390 match can be used to filter the committed files. If editor is
2383 2391 supplied, it is called to get a commit message.
2384 2392 """
2385 2393 if extra is None:
2386 2394 extra = {}
2387 2395
2388 2396 def fail(f, msg):
2389 2397 raise error.Abort('%s: %s' % (f, msg))
2390 2398
2391 2399 if not match:
2392 2400 match = matchmod.always(self.root, '')
2393 2401
2394 2402 if not force:
2395 2403 vdirs = []
2396 2404 match.explicitdir = vdirs.append
2397 2405 match.bad = fail
2398 2406
2399 2407 wlock = lock = tr = None
2400 2408 try:
2401 2409 wlock = self.wlock()
2402 2410 lock = self.lock() # for recent changelog (see issue4368)
2403 2411
2404 2412 wctx = self[None]
2405 2413 merge = len(wctx.parents()) > 1
2406 2414
2407 2415 if not force and merge and not match.always():
2408 2416 raise error.Abort(_('cannot partially commit a merge '
2409 2417 '(do not specify files or patterns)'))
2410 2418
2411 2419 status = self.status(match=match, clean=force)
2412 2420 if force:
2413 2421 status.modified.extend(status.clean) # mq may commit clean files
2414 2422
2415 2423 # check subrepos
2416 2424 subs, commitsubs, newstate = subrepoutil.precommit(
2417 2425 self.ui, wctx, status, match, force=force)
2418 2426
2419 2427 # make sure all explicit patterns are matched
2420 2428 if not force:
2421 2429 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2422 2430
2423 2431 cctx = context.workingcommitctx(self, status,
2424 2432 text, user, date, extra)
2425 2433
2426 2434 # internal config: ui.allowemptycommit
2427 2435 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2428 2436 or extra.get('close') or merge or cctx.files()
2429 2437 or self.ui.configbool('ui', 'allowemptycommit'))
2430 2438 if not allowemptycommit:
2431 2439 return None
2432 2440
2433 2441 if merge and cctx.deleted():
2434 2442 raise error.Abort(_("cannot commit merge with missing files"))
2435 2443
2436 2444 ms = mergemod.mergestate.read(self)
2437 2445 mergeutil.checkunresolved(ms)
2438 2446
2439 2447 if editor:
2440 2448 cctx._text = editor(self, cctx, subs)
2441 2449 edited = (text != cctx._text)
2442 2450
2443 2451 # Save commit message in case this transaction gets rolled back
2444 2452 # (e.g. by a pretxncommit hook). Leave the content alone on
2445 2453 # the assumption that the user will use the same editor again.
2446 2454 msgfn = self.savecommitmessage(cctx._text)
2447 2455
2448 2456 # commit subs and write new state
2449 2457 if subs:
2450 2458 for s in sorted(commitsubs):
2451 2459 sub = wctx.sub(s)
2452 2460 self.ui.status(_('committing subrepository %s\n') %
2453 2461 subrepoutil.subrelpath(sub))
2454 2462 sr = sub.commit(cctx._text, user, date)
2455 2463 newstate[s] = (newstate[s][0], sr)
2456 2464 subrepoutil.writestate(self, newstate)
2457 2465
2458 2466 p1, p2 = self.dirstate.parents()
2459 2467 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2460 2468 try:
2461 2469 self.hook("precommit", throw=True, parent1=hookp1,
2462 2470 parent2=hookp2)
2463 2471 tr = self.transaction('commit')
2464 2472 ret = self.commitctx(cctx, True)
2465 2473 except: # re-raises
2466 2474 if edited:
2467 2475 self.ui.write(
2468 2476 _('note: commit message saved in %s\n') % msgfn)
2469 2477 raise
2470 2478 # update bookmarks, dirstate and mergestate
2471 2479 bookmarks.update(self, [p1, p2], ret)
2472 2480 cctx.markcommitted(ret)
2473 2481 ms.reset()
2474 2482 tr.close()
2475 2483
2476 2484 finally:
2477 2485 lockmod.release(tr, lock, wlock)
2478 2486
2479 2487 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2480 2488 # hack for command that use a temporary commit (eg: histedit)
2481 2489 # temporary commit got stripped before hook release
2482 2490 if self.changelog.hasnode(ret):
2483 2491 self.hook("commit", node=node, parent1=parent1,
2484 2492 parent2=parent2)
2485 2493 self._afterlock(commithook)
2486 2494 return ret
2487 2495
2488 2496 @unfilteredmethod
2489 2497 def commitctx(self, ctx, error=False):
2490 2498 """Add a new revision to current repository.
2491 2499 Revision information is passed via the context argument.
2492 2500
2493 2501 ctx.files() should list all files involved in this commit, i.e.
2494 2502 modified/added/removed files. On merge, it may be wider than the
2495 2503 ctx.files() to be committed, since any file nodes derived directly
2496 2504 from p1 or p2 are excluded from the committed ctx.files().
2497 2505 """
2498 2506
2499 2507 tr = None
2500 2508 p1, p2 = ctx.p1(), ctx.p2()
2501 2509 user = ctx.user()
2502 2510
2503 2511 lock = self.lock()
2504 2512 try:
2505 2513 tr = self.transaction("commit")
2506 2514 trp = weakref.proxy(tr)
2507 2515
2508 2516 if ctx.manifestnode():
2509 2517 # reuse an existing manifest revision
2510 2518 self.ui.debug('reusing known manifest\n')
2511 2519 mn = ctx.manifestnode()
2512 2520 files = ctx.files()
2513 2521 elif ctx.files():
2514 2522 m1ctx = p1.manifestctx()
2515 2523 m2ctx = p2.manifestctx()
2516 2524 mctx = m1ctx.copy()
2517 2525
2518 2526 m = mctx.read()
2519 2527 m1 = m1ctx.read()
2520 2528 m2 = m2ctx.read()
2521 2529
2522 2530 # check in files
2523 2531 added = []
2524 2532 changed = []
2525 2533 removed = list(ctx.removed())
2526 2534 linkrev = len(self)
2527 2535 self.ui.note(_("committing files:\n"))
2528 2536 for f in sorted(ctx.modified() + ctx.added()):
2529 2537 self.ui.note(f + "\n")
2530 2538 try:
2531 2539 fctx = ctx[f]
2532 2540 if fctx is None:
2533 2541 removed.append(f)
2534 2542 else:
2535 2543 added.append(f)
2536 2544 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2537 2545 trp, changed)
2538 2546 m.setflag(f, fctx.flags())
2539 2547 except OSError as inst:
2540 2548 self.ui.warn(_("trouble committing %s!\n") % f)
2541 2549 raise
2542 2550 except IOError as inst:
2543 2551 errcode = getattr(inst, 'errno', errno.ENOENT)
2544 2552 if error or errcode and errcode != errno.ENOENT:
2545 2553 self.ui.warn(_("trouble committing %s!\n") % f)
2546 2554 raise
2547 2555
2548 2556 # update manifest
2549 2557 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2550 2558 drop = [f for f in removed if f in m]
2551 2559 for f in drop:
2552 2560 del m[f]
2553 2561 files = changed + removed
2554 2562 md = None
2555 2563 if not files:
2556 2564 # if no "files" actually changed in terms of the changelog,
2557 2565 # try hard to detect unmodified manifest entry so that the
2558 2566 # exact same commit can be reproduced later on convert.
2559 2567 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2560 2568 if not files and md:
2561 2569 self.ui.debug('not reusing manifest (no file change in '
2562 2570 'changelog, but manifest differs)\n')
2563 2571 if files or md:
2564 2572 self.ui.note(_("committing manifest\n"))
2565 2573 # we're using narrowmatch here since it's already applied at
2566 2574 # other stages (such as dirstate.walk), so we're already
2567 2575 # ignoring things outside of narrowspec in most cases. The
2568 2576 # one case where we might have files outside the narrowspec
2569 2577 # at this point is merges, and we already error out in the
2570 2578 # case where the merge has files outside of the narrowspec,
2571 2579 # so this is safe.
2572 2580 mn = mctx.write(trp, linkrev,
2573 2581 p1.manifestnode(), p2.manifestnode(),
2574 2582 added, drop, match=self.narrowmatch())
2575 2583 else:
2576 2584 self.ui.debug('reusing manifest form p1 (listed files '
2577 2585 'actually unchanged)\n')
2578 2586 mn = p1.manifestnode()
2579 2587 else:
2580 2588 self.ui.debug('reusing manifest from p1 (no file change)\n')
2581 2589 mn = p1.manifestnode()
2582 2590 files = []
2583 2591
2584 2592 # update changelog
2585 2593 self.ui.note(_("committing changelog\n"))
2586 2594 self.changelog.delayupdate(tr)
2587 2595 n = self.changelog.add(mn, files, ctx.description(),
2588 2596 trp, p1.node(), p2.node(),
2589 2597 user, ctx.date(), ctx.extra().copy())
2590 2598 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2591 2599 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2592 2600 parent2=xp2)
2593 2601 # set the new commit is proper phase
2594 2602 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2595 2603 if targetphase:
2596 2604 # retract boundary do not alter parent changeset.
2597 2605 # if a parent have higher the resulting phase will
2598 2606 # be compliant anyway
2599 2607 #
2600 2608 # if minimal phase was 0 we don't need to retract anything
2601 2609 phases.registernew(self, tr, targetphase, [n])
2602 2610 tr.close()
2603 2611 return n
2604 2612 finally:
2605 2613 if tr:
2606 2614 tr.release()
2607 2615 lock.release()
2608 2616
2609 2617 @unfilteredmethod
2610 2618 def destroying(self):
2611 2619 '''Inform the repository that nodes are about to be destroyed.
2612 2620 Intended for use by strip and rollback, so there's a common
2613 2621 place for anything that has to be done before destroying history.
2614 2622
2615 2623 This is mostly useful for saving state that is in memory and waiting
2616 2624 to be flushed when the current lock is released. Because a call to
2617 2625 destroyed is imminent, the repo will be invalidated causing those
2618 2626 changes to stay in memory (waiting for the next unlock), or vanish
2619 2627 completely.
2620 2628 '''
2621 2629 # When using the same lock to commit and strip, the phasecache is left
2622 2630 # dirty after committing. Then when we strip, the repo is invalidated,
2623 2631 # causing those changes to disappear.
2624 2632 if '_phasecache' in vars(self):
2625 2633 self._phasecache.write()
2626 2634
2627 2635 @unfilteredmethod
2628 2636 def destroyed(self):
2629 2637 '''Inform the repository that nodes have been destroyed.
2630 2638 Intended for use by strip and rollback, so there's a common
2631 2639 place for anything that has to be done after destroying history.
2632 2640 '''
2633 2641 # When one tries to:
2634 2642 # 1) destroy nodes thus calling this method (e.g. strip)
2635 2643 # 2) use phasecache somewhere (e.g. commit)
2636 2644 #
2637 2645 # then 2) will fail because the phasecache contains nodes that were
2638 2646 # removed. We can either remove phasecache from the filecache,
2639 2647 # causing it to reload next time it is accessed, or simply filter
2640 2648 # the removed nodes now and write the updated cache.
2641 2649 self._phasecache.filterunknown(self)
2642 2650 self._phasecache.write()
2643 2651
2644 2652 # refresh all repository caches
2645 2653 self.updatecaches()
2646 2654
2647 2655 # Ensure the persistent tag cache is updated. Doing it now
2648 2656 # means that the tag cache only has to worry about destroyed
2649 2657 # heads immediately after a strip/rollback. That in turn
2650 2658 # guarantees that "cachetip == currenttip" (comparing both rev
2651 2659 # and node) always means no nodes have been added or destroyed.
2652 2660
2653 2661 # XXX this is suboptimal when qrefresh'ing: we strip the current
2654 2662 # head, refresh the tag cache, then immediately add a new head.
2655 2663 # But I think doing it this way is necessary for the "instant
2656 2664 # tag cache retrieval" case to work.
2657 2665 self.invalidate()
2658 2666
2659 2667 def status(self, node1='.', node2=None, match=None,
2660 2668 ignored=False, clean=False, unknown=False,
2661 2669 listsubrepos=False):
2662 2670 '''a convenience method that calls node1.status(node2)'''
2663 2671 return self[node1].status(node2, match, ignored, clean, unknown,
2664 2672 listsubrepos)
2665 2673
2666 2674 def addpostdsstatus(self, ps):
2667 2675 """Add a callback to run within the wlock, at the point at which status
2668 2676 fixups happen.
2669 2677
2670 2678 On status completion, callback(wctx, status) will be called with the
2671 2679 wlock held, unless the dirstate has changed from underneath or the wlock
2672 2680 couldn't be grabbed.
2673 2681
2674 2682 Callbacks should not capture and use a cached copy of the dirstate --
2675 2683 it might change in the meanwhile. Instead, they should access the
2676 2684 dirstate via wctx.repo().dirstate.
2677 2685
2678 2686 This list is emptied out after each status run -- extensions should
2679 2687 make sure it adds to this list each time dirstate.status is called.
2680 2688 Extensions should also make sure they don't call this for statuses
2681 2689 that don't involve the dirstate.
2682 2690 """
2683 2691
2684 2692 # The list is located here for uniqueness reasons -- it is actually
2685 2693 # managed by the workingctx, but that isn't unique per-repo.
2686 2694 self._postdsstatus.append(ps)
2687 2695
2688 2696 def postdsstatus(self):
2689 2697 """Used by workingctx to get the list of post-dirstate-status hooks."""
2690 2698 return self._postdsstatus
2691 2699
2692 2700 def clearpostdsstatus(self):
2693 2701 """Used by workingctx to clear post-dirstate-status hooks."""
2694 2702 del self._postdsstatus[:]
2695 2703
2696 2704 def heads(self, start=None):
2697 2705 if start is None:
2698 2706 cl = self.changelog
2699 2707 headrevs = reversed(cl.headrevs())
2700 2708 return [cl.node(rev) for rev in headrevs]
2701 2709
2702 2710 heads = self.changelog.heads(start)
2703 2711 # sort the output in rev descending order
2704 2712 return sorted(heads, key=self.changelog.rev, reverse=True)
2705 2713
2706 2714 def branchheads(self, branch=None, start=None, closed=False):
2707 2715 '''return a (possibly filtered) list of heads for the given branch
2708 2716
2709 2717 Heads are returned in topological order, from newest to oldest.
2710 2718 If branch is None, use the dirstate branch.
2711 2719 If start is not None, return only heads reachable from start.
2712 2720 If closed is True, return heads that are marked as closed as well.
2713 2721 '''
2714 2722 if branch is None:
2715 2723 branch = self[None].branch()
2716 2724 branches = self.branchmap()
2717 2725 if branch not in branches:
2718 2726 return []
2719 2727 # the cache returns heads ordered lowest to highest
2720 2728 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2721 2729 if start is not None:
2722 2730 # filter out the heads that cannot be reached from startrev
2723 2731 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2724 2732 bheads = [h for h in bheads if h in fbheads]
2725 2733 return bheads
2726 2734
2727 2735 def branches(self, nodes):
2728 2736 if not nodes:
2729 2737 nodes = [self.changelog.tip()]
2730 2738 b = []
2731 2739 for n in nodes:
2732 2740 t = n
2733 2741 while True:
2734 2742 p = self.changelog.parents(n)
2735 2743 if p[1] != nullid or p[0] == nullid:
2736 2744 b.append((t, n, p[0], p[1]))
2737 2745 break
2738 2746 n = p[0]
2739 2747 return b
2740 2748
2741 2749 def between(self, pairs):
2742 2750 r = []
2743 2751
2744 2752 for top, bottom in pairs:
2745 2753 n, l, i = top, [], 0
2746 2754 f = 1
2747 2755
2748 2756 while n != bottom and n != nullid:
2749 2757 p = self.changelog.parents(n)[0]
2750 2758 if i == f:
2751 2759 l.append(n)
2752 2760 f = f * 2
2753 2761 n = p
2754 2762 i += 1
2755 2763
2756 2764 r.append(l)
2757 2765
2758 2766 return r
2759 2767
2760 2768 def checkpush(self, pushop):
2761 2769 """Extensions can override this function if additional checks have
2762 2770 to be performed before pushing, or call it if they override push
2763 2771 command.
2764 2772 """
2765 2773
2766 2774 @unfilteredpropertycache
2767 2775 def prepushoutgoinghooks(self):
2768 2776 """Return util.hooks consists of a pushop with repo, remote, outgoing
2769 2777 methods, which are called before pushing changesets.
2770 2778 """
2771 2779 return util.hooks()
2772 2780
2773 2781 def pushkey(self, namespace, key, old, new):
2774 2782 try:
2775 2783 tr = self.currenttransaction()
2776 2784 hookargs = {}
2777 2785 if tr is not None:
2778 2786 hookargs.update(tr.hookargs)
2779 2787 hookargs = pycompat.strkwargs(hookargs)
2780 2788 hookargs[r'namespace'] = namespace
2781 2789 hookargs[r'key'] = key
2782 2790 hookargs[r'old'] = old
2783 2791 hookargs[r'new'] = new
2784 2792 self.hook('prepushkey', throw=True, **hookargs)
2785 2793 except error.HookAbort as exc:
2786 2794 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2787 2795 if exc.hint:
2788 2796 self.ui.write_err(_("(%s)\n") % exc.hint)
2789 2797 return False
2790 2798 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2791 2799 ret = pushkey.push(self, namespace, key, old, new)
2792 2800 def runhook():
2793 2801 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2794 2802 ret=ret)
2795 2803 self._afterlock(runhook)
2796 2804 return ret
2797 2805
2798 2806 def listkeys(self, namespace):
2799 2807 self.hook('prelistkeys', throw=True, namespace=namespace)
2800 2808 self.ui.debug('listing keys for "%s"\n' % namespace)
2801 2809 values = pushkey.list(self, namespace)
2802 2810 self.hook('listkeys', namespace=namespace, values=values)
2803 2811 return values
2804 2812
2805 2813 def debugwireargs(self, one, two, three=None, four=None, five=None):
2806 2814 '''used to test argument passing over the wire'''
2807 2815 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2808 2816 pycompat.bytestr(four),
2809 2817 pycompat.bytestr(five))
2810 2818
2811 2819 def savecommitmessage(self, text):
2812 2820 fp = self.vfs('last-message.txt', 'wb')
2813 2821 try:
2814 2822 fp.write(text)
2815 2823 finally:
2816 2824 fp.close()
2817 2825 return self.pathto(fp.name[len(self.root) + 1:])
2818 2826
2819 2827 # used to avoid circular references so destructors work
2820 2828 def aftertrans(files):
2821 2829 renamefiles = [tuple(t) for t in files]
2822 2830 def a():
2823 2831 for vfs, src, dest in renamefiles:
2824 2832 # if src and dest refer to a same file, vfs.rename is a no-op,
2825 2833 # leaving both src and dest on disk. delete dest to make sure
2826 2834 # the rename couldn't be such a no-op.
2827 2835 vfs.tryunlink(dest)
2828 2836 try:
2829 2837 vfs.rename(src, dest)
2830 2838 except OSError: # journal file does not yet exist
2831 2839 pass
2832 2840 return a
2833 2841
2834 2842 def undoname(fn):
2835 2843 base, name = os.path.split(fn)
2836 2844 assert name.startswith('journal')
2837 2845 return os.path.join(base, name.replace('journal', 'undo', 1))
2838 2846
2839 2847 def instance(ui, path, create, intents=None, createopts=None):
2840 2848 localpath = util.urllocalpath(path)
2841 2849 if create:
2842 2850 createrepository(ui, localpath, createopts=createopts)
2843 2851
2844 2852 return makelocalrepository(ui, localpath, intents=intents)
2845 2853
2846 2854 def islocal(path):
2847 2855 return True
2848 2856
2849 2857 def defaultcreateopts(ui, createopts=None):
2850 2858 """Populate the default creation options for a repository.
2851 2859
2852 2860 A dictionary of explicitly requested creation options can be passed
2853 2861 in. Missing keys will be populated.
2854 2862 """
2855 2863 createopts = dict(createopts or {})
2856 2864
2857 2865 if 'backend' not in createopts:
2858 2866 # experimental config: storage.new-repo-backend
2859 2867 createopts['backend'] = ui.config('storage', 'new-repo-backend')
2860 2868
2861 2869 return createopts
2862 2870
2863 2871 def newreporequirements(ui, createopts):
2864 2872 """Determine the set of requirements for a new local repository.
2865 2873
2866 2874 Extensions can wrap this function to specify custom requirements for
2867 2875 new repositories.
2868 2876 """
2869 2877 # If the repo is being created from a shared repository, we copy
2870 2878 # its requirements.
2871 2879 if 'sharedrepo' in createopts:
2872 2880 requirements = set(createopts['sharedrepo'].requirements)
2873 2881 if createopts.get('sharedrelative'):
2874 2882 requirements.add('relshared')
2875 2883 else:
2876 2884 requirements.add('shared')
2877 2885
2878 2886 return requirements
2879 2887
2880 2888 if 'backend' not in createopts:
2881 2889 raise error.ProgrammingError('backend key not present in createopts; '
2882 2890 'was defaultcreateopts() called?')
2883 2891
2884 2892 if createopts['backend'] != 'revlogv1':
2885 2893 raise error.Abort(_('unable to determine repository requirements for '
2886 2894 'storage backend: %s') % createopts['backend'])
2887 2895
2888 2896 requirements = {'revlogv1'}
2889 2897 if ui.configbool('format', 'usestore'):
2890 2898 requirements.add('store')
2891 2899 if ui.configbool('format', 'usefncache'):
2892 2900 requirements.add('fncache')
2893 2901 if ui.configbool('format', 'dotencode'):
2894 2902 requirements.add('dotencode')
2895 2903
2896 2904 compengine = ui.config('experimental', 'format.compression')
2897 2905 if compengine not in util.compengines:
2898 2906 raise error.Abort(_('compression engine %s defined by '
2899 2907 'experimental.format.compression not available') %
2900 2908 compengine,
2901 2909 hint=_('run "hg debuginstall" to list available '
2902 2910 'compression engines'))
2903 2911
2904 2912 # zlib is the historical default and doesn't need an explicit requirement.
2905 2913 if compengine != 'zlib':
2906 2914 requirements.add('exp-compression-%s' % compengine)
2907 2915
2908 2916 if scmutil.gdinitconfig(ui):
2909 2917 requirements.add('generaldelta')
2910 2918 # experimental config: format.sparse-revlog
2911 2919 if ui.configbool('format', 'sparse-revlog'):
2912 2920 requirements.add(SPARSEREVLOG_REQUIREMENT)
2913 2921 if ui.configbool('experimental', 'treemanifest'):
2914 2922 requirements.add('treemanifest')
2915 2923
2916 2924 revlogv2 = ui.config('experimental', 'revlogv2')
2917 2925 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2918 2926 requirements.remove('revlogv1')
2919 2927 # generaldelta is implied by revlogv2.
2920 2928 requirements.discard('generaldelta')
2921 2929 requirements.add(REVLOGV2_REQUIREMENT)
2922 2930 # experimental config: format.internal-phase
2923 2931 if ui.configbool('format', 'internal-phase'):
2924 2932 requirements.add('internal-phase')
2925 2933
2926 2934 if createopts.get('narrowfiles'):
2927 2935 requirements.add(repository.NARROW_REQUIREMENT)
2928 2936
2929 2937 if createopts.get('lfs'):
2930 2938 requirements.add('lfs')
2931 2939
2932 2940 return requirements
2933 2941
2934 2942 def filterknowncreateopts(ui, createopts):
2935 2943 """Filters a dict of repo creation options against options that are known.
2936 2944
2937 2945 Receives a dict of repo creation options and returns a dict of those
2938 2946 options that we don't know how to handle.
2939 2947
2940 2948 This function is called as part of repository creation. If the
2941 2949 returned dict contains any items, repository creation will not
2942 2950 be allowed, as it means there was a request to create a repository
2943 2951 with options not recognized by loaded code.
2944 2952
2945 2953 Extensions can wrap this function to filter out creation options
2946 2954 they know how to handle.
2947 2955 """
2948 2956 known = {
2949 2957 'backend',
2950 2958 'lfs',
2951 2959 'narrowfiles',
2952 2960 'sharedrepo',
2953 2961 'sharedrelative',
2954 2962 'shareditems',
2955 2963 'shallowfilestore',
2956 2964 }
2957 2965
2958 2966 return {k: v for k, v in createopts.items() if k not in known}
2959 2967
2960 2968 def createrepository(ui, path, createopts=None):
2961 2969 """Create a new repository in a vfs.
2962 2970
2963 2971 ``path`` path to the new repo's working directory.
2964 2972 ``createopts`` options for the new repository.
2965 2973
2966 2974 The following keys for ``createopts`` are recognized:
2967 2975
2968 2976 backend
2969 2977 The storage backend to use.
2970 2978 lfs
2971 2979 Repository will be created with ``lfs`` requirement. The lfs extension
2972 2980 will automatically be loaded when the repository is accessed.
2973 2981 narrowfiles
2974 2982 Set up repository to support narrow file storage.
2975 2983 sharedrepo
2976 2984 Repository object from which storage should be shared.
2977 2985 sharedrelative
2978 2986 Boolean indicating if the path to the shared repo should be
2979 2987 stored as relative. By default, the pointer to the "parent" repo
2980 2988 is stored as an absolute path.
2981 2989 shareditems
2982 2990 Set of items to share to the new repository (in addition to storage).
2983 2991 shallowfilestore
2984 2992 Indicates that storage for files should be shallow (not all ancestor
2985 2993 revisions are known).
2986 2994 """
2987 2995 createopts = defaultcreateopts(ui, createopts=createopts)
2988 2996
2989 2997 unknownopts = filterknowncreateopts(ui, createopts)
2990 2998
2991 2999 if not isinstance(unknownopts, dict):
2992 3000 raise error.ProgrammingError('filterknowncreateopts() did not return '
2993 3001 'a dict')
2994 3002
2995 3003 if unknownopts:
2996 3004 raise error.Abort(_('unable to create repository because of unknown '
2997 3005 'creation option: %s') %
2998 3006 ', '.join(sorted(unknownopts)),
2999 3007 hint=_('is a required extension not loaded?'))
3000 3008
3001 3009 requirements = newreporequirements(ui, createopts=createopts)
3002 3010
3003 3011 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
3004 3012
3005 3013 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
3006 3014 if hgvfs.exists():
3007 3015 raise error.RepoError(_('repository %s already exists') % path)
3008 3016
3009 3017 if 'sharedrepo' in createopts:
3010 3018 sharedpath = createopts['sharedrepo'].sharedpath
3011 3019
3012 3020 if createopts.get('sharedrelative'):
3013 3021 try:
3014 3022 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
3015 3023 except (IOError, ValueError) as e:
3016 3024 # ValueError is raised on Windows if the drive letters differ
3017 3025 # on each path.
3018 3026 raise error.Abort(_('cannot calculate relative path'),
3019 3027 hint=stringutil.forcebytestr(e))
3020 3028
3021 3029 if not wdirvfs.exists():
3022 3030 wdirvfs.makedirs()
3023 3031
3024 3032 hgvfs.makedir(notindexed=True)
3025 3033 if 'sharedrepo' not in createopts:
3026 3034 hgvfs.mkdir(b'cache')
3027 3035 hgvfs.mkdir(b'wcache')
3028 3036
3029 3037 if b'store' in requirements and 'sharedrepo' not in createopts:
3030 3038 hgvfs.mkdir(b'store')
3031 3039
3032 3040 # We create an invalid changelog outside the store so very old
3033 3041 # Mercurial versions (which didn't know about the requirements
3034 3042 # file) encounter an error on reading the changelog. This
3035 3043 # effectively locks out old clients and prevents them from
3036 3044 # mucking with a repo in an unknown format.
3037 3045 #
3038 3046 # The revlog header has version 2, which won't be recognized by
3039 3047 # such old clients.
3040 3048 hgvfs.append(b'00changelog.i',
3041 3049 b'\0\0\0\2 dummy changelog to prevent using the old repo '
3042 3050 b'layout')
3043 3051
3044 3052 scmutil.writerequires(hgvfs, requirements)
3045 3053
3046 3054 # Write out file telling readers where to find the shared store.
3047 3055 if 'sharedrepo' in createopts:
3048 3056 hgvfs.write(b'sharedpath', sharedpath)
3049 3057
3050 3058 if createopts.get('shareditems'):
3051 3059 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
3052 3060 hgvfs.write(b'shared', shared)
3053 3061
3054 3062 def poisonrepository(repo):
3055 3063 """Poison a repository instance so it can no longer be used."""
3056 3064 # Perform any cleanup on the instance.
3057 3065 repo.close()
3058 3066
3059 3067 # Our strategy is to replace the type of the object with one that
3060 3068 # has all attribute lookups result in error.
3061 3069 #
3062 3070 # But we have to allow the close() method because some constructors
3063 3071 # of repos call close() on repo references.
3064 3072 class poisonedrepository(object):
3065 3073 def __getattribute__(self, item):
3066 3074 if item == r'close':
3067 3075 return object.__getattribute__(self, item)
3068 3076
3069 3077 raise error.ProgrammingError('repo instances should not be used '
3070 3078 'after unshare')
3071 3079
3072 3080 def close(self):
3073 3081 pass
3074 3082
3075 3083 # We may have a repoview, which intercepts __setattr__. So be sure
3076 3084 # we operate at the lowest level possible.
3077 3085 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,228 +1,298 b''
1 1 # narrowspec.py - methods for working with a narrow view of a repository
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 error,
15 15 match as matchmod,
16 merge,
16 17 repository,
17 18 sparse,
18 19 util,
19 20 )
20 21
22 # The file in .hg/store/ that indicates which paths exit in the store
21 23 FILENAME = 'narrowspec'
24 # The file in .hg/ that indicates which paths exit in the dirstate
25 DIRSTATE_FILENAME = 'narrowspec.dirstate'
22 26
23 27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
24 28 # only contain patterns that are fast and safe to evaluate. Keep in mind
25 29 # that patterns are supplied by clients and executed on remote servers
26 30 # as part of wire protocol commands. That means that changes to this
27 31 # data structure influence the wire protocol and should not be taken
28 32 # lightly - especially removals.
29 33 VALID_PREFIXES = (
30 34 b'path:',
31 35 b'rootfilesin:',
32 36 )
33 37
34 38 def normalizesplitpattern(kind, pat):
35 39 """Returns the normalized version of a pattern and kind.
36 40
37 41 Returns a tuple with the normalized kind and normalized pattern.
38 42 """
39 43 pat = pat.rstrip('/')
40 44 _validatepattern(pat)
41 45 return kind, pat
42 46
43 47 def _numlines(s):
44 48 """Returns the number of lines in s, including ending empty lines."""
45 49 # We use splitlines because it is Unicode-friendly and thus Python 3
46 50 # compatible. However, it does not count empty lines at the end, so trick
47 51 # it by adding a character at the end.
48 52 return len((s + 'x').splitlines())
49 53
50 54 def _validatepattern(pat):
51 55 """Validates the pattern and aborts if it is invalid.
52 56
53 57 Patterns are stored in the narrowspec as newline-separated
54 58 POSIX-style bytestring paths. There's no escaping.
55 59 """
56 60
57 61 # We use newlines as separators in the narrowspec file, so don't allow them
58 62 # in patterns.
59 63 if _numlines(pat) > 1:
60 64 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
61 65
62 66 components = pat.split('/')
63 67 if '.' in components or '..' in components:
64 68 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
65 69
66 70 def normalizepattern(pattern, defaultkind='path'):
67 71 """Returns the normalized version of a text-format pattern.
68 72
69 73 If the pattern has no kind, the default will be added.
70 74 """
71 75 kind, pat = matchmod._patsplit(pattern, defaultkind)
72 76 return '%s:%s' % normalizesplitpattern(kind, pat)
73 77
74 78 def parsepatterns(pats):
75 79 """Parses an iterable of patterns into a typed pattern set.
76 80
77 81 Patterns are assumed to be ``path:`` if no prefix is present.
78 82 For safety and performance reasons, only some prefixes are allowed.
79 83 See ``validatepatterns()``.
80 84
81 85 This function should be used on patterns that come from the user to
82 86 normalize and validate them to the internal data structure used for
83 87 representing patterns.
84 88 """
85 89 res = {normalizepattern(orig) for orig in pats}
86 90 validatepatterns(res)
87 91 return res
88 92
89 93 def validatepatterns(pats):
90 94 """Validate that patterns are in the expected data structure and format.
91 95
92 96 And that is a set of normalized patterns beginning with ``path:`` or
93 97 ``rootfilesin:``.
94 98
95 99 This function should be used to validate internal data structures
96 100 and patterns that are loaded from sources that use the internal,
97 101 prefixed pattern representation (but can't necessarily be fully trusted).
98 102 """
99 103 if not isinstance(pats, set):
100 104 raise error.ProgrammingError('narrow patterns should be a set; '
101 105 'got %r' % pats)
102 106
103 107 for pat in pats:
104 108 if not pat.startswith(VALID_PREFIXES):
105 109 # Use a Mercurial exception because this can happen due to user
106 110 # bugs (e.g. manually updating spec file).
107 111 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
108 112 hint=_('narrow patterns must begin with one of '
109 113 'the following: %s') %
110 114 ', '.join(VALID_PREFIXES))
111 115
112 116 def format(includes, excludes):
113 117 output = '[include]\n'
114 118 for i in sorted(includes - excludes):
115 119 output += i + '\n'
116 120 output += '[exclude]\n'
117 121 for e in sorted(excludes):
118 122 output += e + '\n'
119 123 return output
120 124
121 125 def match(root, include=None, exclude=None):
122 126 if not include:
123 127 # Passing empty include and empty exclude to matchmod.match()
124 128 # gives a matcher that matches everything, so explicitly use
125 129 # the nevermatcher.
126 130 return matchmod.never(root, '')
127 131 return matchmod.match(root, '', [], include=include or [],
128 132 exclude=exclude or [])
129 133
130 134 def parseconfig(ui, spec):
131 135 # maybe we should care about the profiles returned too
132 136 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
133 137 if profiles:
134 138 raise error.Abort(_("including other spec files using '%include' is not"
135 139 " supported in narrowspec"))
136 140
137 141 validatepatterns(includepats)
138 142 validatepatterns(excludepats)
139 143
140 144 return includepats, excludepats
141 145
142 146 def load(repo):
143 147 try:
144 148 spec = repo.svfs.read(FILENAME)
145 149 except IOError as e:
146 150 # Treat "narrowspec does not exist" the same as "narrowspec file exists
147 151 # and is empty".
148 152 if e.errno == errno.ENOENT:
149 153 return set(), set()
150 154 raise
151 155
152 156 return parseconfig(repo.ui, spec)
153 157
154 158 def save(repo, includepats, excludepats):
155 159 validatepatterns(includepats)
156 160 validatepatterns(excludepats)
157 161 spec = format(includepats, excludepats)
158 162 repo.svfs.write(FILENAME, spec)
159 163
164 def copytoworkingcopy(repo, tr):
165 if tr:
166 def write(file):
167 spec = repo.svfs.read(FILENAME)
168 file.write(spec)
169 file.close()
170 tr.addfilegenerator('narrowspec', (DIRSTATE_FILENAME,), write,
171 location='plain')
172 else:
173 spec = repo.svfs.read(FILENAME)
174 repo.vfs.write(DIRSTATE_FILENAME, spec)
175
160 176 def savebackup(repo, backupname):
161 177 if repository.NARROW_REQUIREMENT not in repo.requirements:
162 178 return
163 179 svfs = repo.svfs
164 180 svfs.tryunlink(backupname)
165 181 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
166 182
167 183 def restorebackup(repo, backupname):
168 184 if repository.NARROW_REQUIREMENT not in repo.requirements:
169 185 return
170 186 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
171 187
172 188 def clearbackup(repo, backupname):
173 189 if repository.NARROW_REQUIREMENT not in repo.requirements:
174 190 return
175 191 repo.svfs.unlink(backupname)
176 192
177 193 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
178 194 r""" Restricts the patterns according to repo settings,
179 195 results in a logical AND operation
180 196
181 197 :param req_includes: requested includes
182 198 :param req_excludes: requested excludes
183 199 :param repo_includes: repo includes
184 200 :param repo_excludes: repo excludes
185 201 :return: include patterns, exclude patterns, and invalid include patterns.
186 202
187 203 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
188 204 (set(['f1']), {}, [])
189 205 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
190 206 (set(['f1']), {}, [])
191 207 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
192 208 (set(['f1/fc1']), {}, [])
193 209 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
194 210 ([], set(['path:.']), [])
195 211 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
196 212 (set(['f2/fc2']), {}, [])
197 213 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
198 214 ([], set(['path:.']), [])
199 215 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
200 216 (set(['f1/$non_exitent_var']), {}, [])
201 217 """
202 218 res_excludes = set(req_excludes)
203 219 res_excludes.update(repo_excludes)
204 220 invalid_includes = []
205 221 if not req_includes:
206 222 res_includes = set(repo_includes)
207 223 elif 'path:.' not in repo_includes:
208 224 res_includes = []
209 225 for req_include in req_includes:
210 226 req_include = util.expandpath(util.normpath(req_include))
211 227 if req_include in repo_includes:
212 228 res_includes.append(req_include)
213 229 continue
214 230 valid = False
215 231 for repo_include in repo_includes:
216 232 if req_include.startswith(repo_include + '/'):
217 233 valid = True
218 234 res_includes.append(req_include)
219 235 break
220 236 if not valid:
221 237 invalid_includes.append(req_include)
222 238 if len(res_includes) == 0:
223 239 res_excludes = {'path:.'}
224 240 else:
225 241 res_includes = set(res_includes)
226 242 else:
227 243 res_includes = set(req_includes)
228 244 return res_includes, res_excludes, invalid_includes
245
246 # These two are extracted for extensions (specifically for Google's CitC file
247 # system)
248 def _deletecleanfiles(repo, files):
249 for f in files:
250 repo.wvfs.unlinkpath(f)
251
252 def _writeaddedfiles(repo, pctx, files):
253 actions = merge.emptyactions()
254 addgaction = actions['g'].append
255 mf = repo['.'].manifest()
256 for f in files:
257 if not repo.wvfs.exists(f):
258 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
259 merge.applyupdates(repo, actions, wctx=repo[None],
260 mctx=repo['.'], overwrite=False)
261
262 def checkworkingcopynarrowspec(repo):
263 storespec = repo.svfs.tryread(FILENAME)
264 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
265 if wcspec != storespec:
266 raise error.Abort(_("working copy's narrowspec is stale"),
267 hint=_("run 'hg tracked --update-working-copy'"))
268
269 def updateworkingcopy(repo, tr):
270 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
271 newspec = repo.svfs.tryread(FILENAME)
272
273 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
274 newincludes, newexcludes = parseconfig(repo.ui, newspec)
275 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
276 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
277 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
278 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
279
280 ds = repo.dirstate
281 lookup, status = ds.status(removedmatch, subrepos=[], ignored=False,
282 clean=True, unknown=False)
283 _deletecleanfiles(repo, status.clean)
284 trackeddirty = lookup + status.modified + status.added
285 for f in sorted(trackeddirty):
286 repo.ui.status(_('not deleting possibly dirty file %s\n') % f)
287 for f in status.clean + trackeddirty:
288 ds.drop(f)
289
290 repo.narrowpats = newincludes, newexcludes
291 repo._narrowmatch = newmatch
292 pctx = repo['.']
293 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
294 for f in newfiles:
295 ds.normallookup(f)
296 _writeaddedfiles(repo, pctx, newfiles)
297
298 ds.write(tr)
@@ -1,43 +1,44 b''
1 1 $ . "$TESTDIR/narrow-library.sh"
2 2 $ hg init repo
3 3 $ cd repo
4 4 $ cat << EOF > .hg/store/narrowspec
5 5 > [include]
6 6 > path:foo
7 7 > [exclude]
8 8 > EOF
9 $ cp .hg/store/narrowspec .hg/narrowspec.dirstate
9 10 $ echo treemanifest >> .hg/requires
10 11 $ echo narrowhg-experimental >> .hg/requires
11 12 $ mkdir -p foo/bar
12 13 $ echo b > foo/f
13 14 $ echo c > foo/bar/f
14 15 $ hg commit -Am hi
15 16 adding foo/bar/f
16 17 adding foo/f
17 18 $ hg debugindex -m
18 19 rev linkrev nodeid p1 p2
19 20 0 0 14a5d056d75a 000000000000 000000000000
20 21 $ hg debugindex --dir foo
21 22 rev linkrev nodeid p1 p2
22 23 0 0 e635c7857aef 000000000000 000000000000
23 24 $ hg debugindex --dir foo/
24 25 rev linkrev nodeid p1 p2
25 26 0 0 e635c7857aef 000000000000 000000000000
26 27 $ hg debugindex --dir foo/bar
27 28 rev linkrev nodeid p1 p2
28 29 0 0 e091d4224761 000000000000 000000000000
29 30 $ hg debugindex --dir foo/bar/
30 31 rev linkrev nodeid p1 p2
31 32 0 0 e091d4224761 000000000000 000000000000
32 33 $ hg debugdata -m 0
33 34 foo\x00e635c7857aef92ac761ce5741a99da159abbbb24t (esc)
34 35 $ hg debugdata --dir foo 0
35 36 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
36 37 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
37 38 $ hg debugdata --dir foo/ 0
38 39 bar\x00e091d42247613adff5d41b67f15fe7189ee97b39t (esc)
39 40 f\x001e88685f5ddec574a34c70af492f95b6debc8741 (esc)
40 41 $ hg debugdata --dir foo/bar 0
41 42 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
42 43 $ hg debugdata --dir foo/bar/ 0
43 44 f\x00149da44f2a4e14f488b7bd4157945a9837408c00 (esc)
@@ -1,132 +1,170 b''
1 1 #testcases flat tree
2 2
3 3 $ . "$TESTDIR/narrow-library.sh"
4 4
5 5 #if tree
6 6 $ cat << EOF >> $HGRCPATH
7 7 > [experimental]
8 8 > treemanifest = 1
9 9 > EOF
10 10 #endif
11 11
12 12 $ cat << EOF >> $HGRCPATH
13 13 > [extensions]
14 14 > share =
15 15 > EOF
16 16
17 17 $ hg init remote
18 18 $ cd remote
19 19 $ for x in `$TESTDIR/seq.py 0 10`
20 20 > do
21 21 > mkdir d$x
22 22 > echo $x > d$x/f
23 23 > hg add d$x/f
24 24 > hg commit -m "add d$x/f"
25 25 > done
26 26 $ cd ..
27 27
28 28 $ hg clone --narrow ssh://user@dummy/remote main -q \
29 29 > --include d1 --include d3 --include d5 --include d7
30 30
31 31 $ hg share main share
32 32 updating working directory
33 33 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
34 34 $ hg -R share tracked
35 35 I path:d1
36 36 I path:d3
37 37 I path:d5
38 38 I path:d7
39 39 $ hg -R share files
40 40 share/d1/f
41 41 share/d3/f
42 42 share/d5/f
43 43 share/d7/f
44 44
45 45 Narrow the share and check that the main repo's working copy gets updated
46 46
47 47 # Make d3/f dirty
48 48 $ echo x >> main/d3/f
49 49 $ echo y >> main/d3/g
50 50 $ hg add main/d3/g
51 51 $ hg -R main st
52 52 M d3/f
53 53 A d3/g
54 54 # Make d5/f not match the dirstate timestamp even though it's clean
55 55 $ sleep 2
56 56 $ hg -R main st
57 57 M d3/f
58 58 A d3/g
59 59 $ hg -R main debugdirstate --no-dates
60 60 n 644 2 set d1/f
61 61 n 644 2 unset d3/f
62 62 a 0 -1 unset d3/g
63 63 n 644 2 set d5/f
64 64 n 644 2 set d7/f
65 65 $ touch main/d5/f
66 66 $ hg -R share tracked --removeinclude d1 --removeinclude d3 --removeinclude d5
67 67 comparing with ssh://user@dummy/remote
68 68 searching for changes
69 69 looking for local changes to affected paths
70 70 deleting data/d1/f.i
71 71 deleting data/d3/f.i
72 72 deleting data/d5/f.i
73 73 deleting meta/d1/00manifest.i (tree !)
74 74 deleting meta/d3/00manifest.i (tree !)
75 75 deleting meta/d5/00manifest.i (tree !)
76 76 $ hg -R main tracked
77 77 I path:d7
78 $ hg -R main files
79 abort: working copy's narrowspec is stale
80 (run 'hg tracked --update-working-copy')
81 [255]
82 $ hg -R main tracked --update-working-copy
83 not deleting possibly dirty file d3/f
84 not deleting possibly dirty file d3/g
85 not deleting possibly dirty file d5/f
78 86 # d1/f, d3/f, d3/g and d5/f should no longer be reported
79 87 $ hg -R main files
80 88 main/d7/f
81 89 # d1/f should no longer be there, d3/f should be since it was dirty, d3/g should be there since
82 90 # it was added, and d5/f should be since we couldn't be sure it was clean
83 91 $ find main/d* -type f | sort
84 main/d1/f
85 92 main/d3/f
86 93 main/d3/g
87 94 main/d5/f
88 95 main/d7/f
89 96
90 97 Widen the share and check that the main repo's working copy gets updated
91 98
92 99 $ hg -R share tracked --addinclude d1 --addinclude d3 -q
93 100 $ hg -R share tracked
94 101 I path:d1
95 102 I path:d3
96 103 I path:d7
97 104 $ hg -R share files
98 105 share/d1/f
99 106 share/d3/f
100 107 share/d7/f
101 108 $ hg -R main tracked
102 109 I path:d1
103 110 I path:d3
104 111 I path:d7
112 $ hg -R main files
113 abort: working copy's narrowspec is stale
114 (run 'hg tracked --update-working-copy')
115 [255]
116 $ hg -R main tracked --update-working-copy
105 117 # d1/f, d3/f should be back
106 118 $ hg -R main files
107 119 main/d1/f
108 120 main/d3/f
109 main/d3/g
110 121 main/d7/f
111 122 # d3/f should be modified (not clobbered by the widening), and d3/g should be untracked
112 123 $ hg -R main st --all
113 124 M d3/f
114 A d3/g
125 ? d3/g
115 126 C d1/f
116 127 C d7/f
117 128
118 129 We should also be able to unshare without breaking everything:
119 130
120 131 $ hg share main share-unshare
121 132 updating working directory
122 133 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 134 $ cd share-unshare
124 135 $ hg unshare
125 136 $ hg verify
126 137 checking changesets
127 138 checking manifests
128 139 checking directory manifests (tree !)
129 140 crosschecking files in changesets and manifests
130 141 checking files
131 142 checked 11 changesets with 3 changes to 3 files
132 143 $ cd ..
144
145 Dirstate should be left alone when upgrading from version of hg that didn't support narrow+share
146
147 $ hg share main share-upgrade
148 updating working directory
149 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 $ cd share-upgrade
151 $ echo x >> d1/f
152 $ echo y >> d3/g
153 $ hg add d3/g
154 $ hg rm d7/f
155 $ hg st
156 M d1/f
157 A d3/g
158 R d7/f
159 Make it look like a repo from before narrow+share was supported
160 $ rm .hg/narrowspec.dirstate
161 $ hg st
162 abort: working copy's narrowspec is stale
163 (run 'hg tracked --update-working-copy')
164 [255]
165 $ hg tracked --update-working-copy
166 $ hg st
167 M d1/f
168 A d3/g
169 R d7/f
170 $ cd ..
@@ -1,218 +1,220 b''
1 1 #testcases flat tree
2 2 $ . "$TESTDIR/narrow-library.sh"
3 3
4 4 #if tree
5 5 $ cat << EOF >> $HGRCPATH
6 6 > [experimental]
7 7 > treemanifest = 1
8 8 > EOF
9 9 #endif
10 10
11 11 $ hg init master
12 12 $ cd master
13 13 $ cat >> .hg/hgrc <<EOF
14 14 > [narrow]
15 15 > serveellipses=True
16 16 > EOF
17 17
18 18 $ mkdir inside
19 19 $ echo 'inside' > inside/f
20 20 $ hg add inside/f
21 21 $ hg commit -m 'add inside'
22 22
23 23 $ mkdir widest
24 24 $ echo 'widest' > widest/f
25 25 $ hg add widest/f
26 26 $ hg commit -m 'add widest'
27 27
28 28 $ mkdir outside
29 29 $ echo 'outside' > outside/f
30 30 $ hg add outside/f
31 31 $ hg commit -m 'add outside'
32 32
33 33 $ cd ..
34 34
35 35 narrow clone the inside file
36 36
37 37 $ hg clone --narrow ssh://user@dummy/master narrow --include inside
38 38 requesting all changes
39 39 adding changesets
40 40 adding manifests
41 41 adding file changes
42 42 added 2 changesets with 1 changes to 1 files
43 43 new changesets *:* (glob)
44 44 updating to branch default
45 45 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
46 46 $ cd narrow
47 47 $ hg tracked
48 48 I path:inside
49 49 $ ls
50 50 inside
51 51 $ cat inside/f
52 52 inside
53 53 $ cd ..
54 54
55 55 add more upstream files which we will include in a wider narrow spec
56 56
57 57 $ cd master
58 58
59 59 $ mkdir wider
60 60 $ echo 'wider' > wider/f
61 61 $ hg add wider/f
62 62 $ echo 'widest v2' > widest/f
63 63 $ hg commit -m 'add wider, update widest'
64 64
65 65 $ echo 'widest v3' > widest/f
66 66 $ hg commit -m 'update widest v3'
67 67
68 68 $ echo 'inside v2' > inside/f
69 69 $ hg commit -m 'update inside'
70 70
71 71 $ mkdir outside2
72 72 $ echo 'outside2' > outside2/f
73 73 $ hg add outside2/f
74 74 $ hg commit -m 'add outside2'
75 75
76 76 $ echo 'widest v4' > widest/f
77 77 $ hg commit -m 'update widest v4'
78 78
79 79 $ hg log -T "{if(ellipsis, '...')}{rev}: {desc}\n"
80 80 7: update widest v4
81 81 6: add outside2
82 82 5: update inside
83 83 4: update widest v3
84 84 3: add wider, update widest
85 85 2: add outside
86 86 1: add widest
87 87 0: add inside
88 88
89 89 $ cd ..
90 90
91 91 Testing the --import-rules flag of `hg tracked` command
92 92
93 93 $ cd narrow
94 94 $ hg tracked --import-rules
95 95 hg tracked: option --import-rules requires argument
96 96 hg tracked [OPTIONS]... [REMOTE]
97 97
98 98 show or change the current narrowspec
99 99
100 100 options ([+] can be repeated):
101 101
102 102 --addinclude VALUE [+] new paths to include
103 103 --removeinclude VALUE [+] old paths to no longer include
104 104 --addexclude VALUE [+] new paths to exclude
105 105 --import-rules VALUE import narrowspecs from a file
106 106 --removeexclude VALUE [+] old paths to no longer exclude
107 107 --clear whether to replace the existing narrowspec
108 108 --force-delete-local-changes forces deletion of local changes when
109 109 narrowing
110 --update-working-copy update working copy when the store has
111 changed
110 112 -e --ssh CMD specify ssh command to use
111 113 --remotecmd CMD specify hg command to run on the remote side
112 114 --insecure do not verify server certificate (ignoring
113 115 web.cacerts config)
114 116
115 117 (use 'hg tracked -h' to show more help)
116 118 [255]
117 119 $ hg tracked --import-rules doesnotexist
118 120 abort: cannot read narrowspecs from '$TESTTMP/narrow/doesnotexist': $ENOENT$
119 121 [255]
120 122
121 123 $ cat > specs <<EOF
122 124 > %include foo
123 125 > [include]
124 126 > path:widest/
125 127 > [exclude]
126 128 > path:inside/
127 129 > EOF
128 130
129 131 $ hg tracked --import-rules specs
130 132 abort: including other spec files using '%include' is not supported in narrowspec
131 133 [255]
132 134
133 135 $ cat > specs <<EOF
134 136 > [include]
135 137 > outisde
136 138 > [exclude]
137 139 > inside
138 140 > EOF
139 141
140 142 $ hg tracked --import-rules specs
141 143 comparing with ssh://user@dummy/master
142 144 searching for changes
143 145 looking for local changes to affected paths
144 146 deleting data/inside/f.i
145 147 deleting meta/inside/00manifest.i (tree !)
146 148 no changes found
147 149 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
148 150 adding changesets
149 151 adding manifests
150 152 adding file changes
151 153 added 2 changesets with 0 changes to 0 files
152 154 new changesets *:* (glob)
153 155 $ hg tracked
154 156 I path:outisde
155 157 X path:inside
156 158
157 159 Testing the --import-rules flag with --addinclude and --addexclude
158 160
159 161 $ cat > specs <<EOF
160 162 > [include]
161 163 > widest
162 164 > EOF
163 165
164 166 $ hg tracked --import-rules specs --addinclude 'wider/'
165 167 comparing with ssh://user@dummy/master
166 168 searching for changes
167 169 no changes found
168 170 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
169 171 adding changesets
170 172 adding manifests
171 173 adding file changes
172 174 added 3 changesets with 1 changes to 1 files
173 175 new changesets *:* (glob)
174 176 $ hg tracked
175 177 I path:outisde
176 178 I path:wider
177 179 I path:widest
178 180 X path:inside
179 181
180 182 $ cat > specs <<EOF
181 183 > [exclude]
182 184 > outside2
183 185 > EOF
184 186
185 187 $ hg tracked --import-rules specs --addexclude 'widest'
186 188 comparing with ssh://user@dummy/master
187 189 searching for changes
188 190 looking for local changes to affected paths
189 191 deleting data/widest/f.i
190 192 deleting meta/widest/00manifest.i (tree !)
191 193 $ hg tracked
192 194 I path:outisde
193 195 I path:wider
194 196 X path:inside
195 197 X path:outside2
196 198 X path:widest
197 199
198 200 $ hg tracked --import-rules specs --clear
199 201 abort: the --clear option is not yet supported
200 202 [255]
201 203
202 204 Testing with passing a out of wdir file
203 205
204 206 $ cat > ../nspecs <<EOF
205 207 > [include]
206 208 > widest
207 209 > EOF
208 210
209 211 $ hg tracked --import-rules ../nspecs
210 212 comparing with ssh://user@dummy/master
211 213 searching for changes
212 214 no changes found
213 215 saved backup bundle to $TESTTMP/narrow/.hg/strip-backup/*-widen.hg (glob)
214 216 adding changesets
215 217 adding manifests
216 218 adding file changes
217 219 added 3 changesets with 0 changes to 0 files
218 220 new changesets *:* (glob)
General Comments 0
You need to be logged in to leave comments. Login now