##// END OF EJS Templates
narrow: mark requirement as a constant...
Augie Fackler -
r36105:8c31187b default
parent child Browse files
Show More
@@ -1,93 +1,93 b''
1 1 # __init__.py - narrowhg extension
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
8 8
9 9 from __future__ import absolute_import
10 10
11 11 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
12 12 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
13 13 # be specifying the version(s) of Mercurial they are tested with, or
14 14 # leave the attribute unspecified.
15 15 testedwith = 'ships-with-hg-core'
16 16
17 17 from mercurial import (
18 18 extensions,
19 19 hg,
20 20 localrepo,
21 21 registrar,
22 22 verify as verifymod,
23 23 )
24 24
25 25 from . import (
26 26 narrowbundle2,
27 27 narrowchangegroup,
28 28 narrowcommands,
29 29 narrowcopies,
30 30 narrowdirstate,
31 31 narrowmerge,
32 32 narrowpatch,
33 33 narrowrepo,
34 34 narrowrevlog,
35 35 narrowtemplates,
36 36 narrowwirepeer,
37 37 )
38 38
39 39 configtable = {}
40 40 configitem = registrar.configitem(configtable)
41 41 # Narrowhg *has* support for serving ellipsis nodes (which are used at
42 42 # least by Google's internal server), but that support is pretty
43 43 # fragile and has a lot of problems on real-world repositories that
44 44 # have complex graph topologies. This could probably be corrected, but
45 45 # absent someone needing the full support for ellipsis nodes in
46 46 # repositories with merges, it's unlikely this work will get done. As
47 47 # of this writining in late 2017, all repositories large enough for
48 48 # ellipsis nodes to be a hard requirement also enforce strictly linear
49 49 # history for other scaling reasons.
50 50 configitem('experimental', 'narrowservebrokenellipses',
51 51 default=False,
52 52 alias=[('narrow', 'serveellipses')],
53 53 )
54 54
55 55 # Export the commands table for Mercurial to see.
56 56 cmdtable = narrowcommands.table
57 57
58 localrepo.localrepository._basesupported.add(narrowrepo.requirement)
58 localrepo.localrepository._basesupported.add(narrowrepo.REQUIREMENT)
59 59
60 60 def uisetup(ui):
61 61 """Wraps user-facing mercurial commands with narrow-aware versions."""
62 62 narrowrevlog.setup()
63 63 narrowbundle2.setup()
64 64 narrowmerge.setup()
65 65 narrowtemplates.setup()
66 66 narrowcommands.setup()
67 67 narrowchangegroup.setup()
68 68 narrowwirepeer.uisetup()
69 69
70 70 def reposetup(ui, repo):
71 71 """Wraps local repositories with narrow repo support."""
72 72 if not isinstance(repo, localrepo.localrepository):
73 73 return
74 74
75 if narrowrepo.requirement in repo.requirements:
75 if narrowrepo.REQUIREMENT in repo.requirements:
76 76 narrowrepo.wraprepo(repo, True)
77 77 narrowcopies.setup(repo)
78 78 narrowdirstate.setup(repo)
79 79 narrowpatch.setup(repo)
80 80 narrowwirepeer.reposetup(repo)
81 81
82 82 def _verifierinit(orig, self, repo, matcher=None):
83 83 # The verifier's matcher argument was desgined for narrowhg, so it should
84 84 # be None from core. If another extension passes a matcher (unlikely),
85 85 # we'll have to fail until matchers can be composed more easily.
86 86 assert matcher is None
87 87 matcher = getattr(repo, 'narrowmatch', lambda: None)()
88 88 orig(self, repo, matcher)
89 89
90 90 def extsetup(ui):
91 91 extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
92 92 extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
93 93 extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
@@ -1,496 +1,496 b''
1 1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import struct
13 13
14 14 from mercurial.i18n import _
15 15 from mercurial.node import (
16 16 bin,
17 17 nullid,
18 18 nullrev,
19 19 )
20 20 from mercurial import (
21 21 bundle2,
22 22 changegroup,
23 23 dagutil,
24 24 error,
25 25 exchange,
26 26 extensions,
27 27 repair,
28 28 util,
29 29 wireproto,
30 30 )
31 31
32 32 from . import (
33 33 narrowrepo,
34 34 narrowspec,
35 35 )
36 36
37 37 NARROWCAP = 'narrow'
38 38 _NARROWACL_SECTION = 'narrowhgacl'
39 39 _CHANGESPECPART = NARROWCAP + ':changespec'
40 40 _SPECPART = NARROWCAP + ':spec'
41 41 _SPECPART_INCLUDE = 'include'
42 42 _SPECPART_EXCLUDE = 'exclude'
43 43 _KILLNODESIGNAL = 'KILL'
44 44 _DONESIGNAL = 'DONE'
45 45 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
46 46 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
47 47 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
48 48 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
49 49
50 50 # When advertising capabilities, always include narrow clone support.
51 51 def getrepocaps_narrow(orig, repo, **kwargs):
52 52 caps = orig(repo, **kwargs)
53 53 caps[NARROWCAP] = ['v0']
54 54 return caps
55 55
56 56 def _computeellipsis(repo, common, heads, known, match, depth=None):
57 57 """Compute the shape of a narrowed DAG.
58 58
59 59 Args:
60 60 repo: The repository we're transferring.
61 61 common: The roots of the DAG range we're transferring.
62 62 May be just [nullid], which means all ancestors of heads.
63 63 heads: The heads of the DAG range we're transferring.
64 64 match: The narrowmatcher that allows us to identify relevant changes.
65 65 depth: If not None, only consider nodes to be full nodes if they are at
66 66 most depth changesets away from one of heads.
67 67
68 68 Returns:
69 69 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
70 70
71 71 visitnodes: The list of nodes (either full or ellipsis) which
72 72 need to be sent to the client.
73 73 relevant_nodes: The set of changelog nodes which change a file inside
74 74 the narrowspec. The client needs these as non-ellipsis nodes.
75 75 ellipsisroots: A dict of {rev: parents} that is used in
76 76 narrowchangegroup to produce ellipsis nodes with the
77 77 correct parents.
78 78 """
79 79 cl = repo.changelog
80 80 mfl = repo.manifestlog
81 81
82 82 cldag = dagutil.revlogdag(cl)
83 83 # dagutil does not like nullid/nullrev
84 84 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
85 85 headsrevs = cldag.internalizeall(heads)
86 86 if depth:
87 87 revdepth = {h: 0 for h in headsrevs}
88 88
89 89 ellipsisheads = collections.defaultdict(set)
90 90 ellipsisroots = collections.defaultdict(set)
91 91
92 92 def addroot(head, curchange):
93 93 """Add a root to an ellipsis head, splitting heads with 3 roots."""
94 94 ellipsisroots[head].add(curchange)
95 95 # Recursively split ellipsis heads with 3 roots by finding the
96 96 # roots' youngest common descendant which is an elided merge commit.
97 97 # That descendant takes 2 of the 3 roots as its own, and becomes a
98 98 # root of the head.
99 99 while len(ellipsisroots[head]) > 2:
100 100 child, roots = splithead(head)
101 101 splitroots(head, child, roots)
102 102 head = child # Recurse in case we just added a 3rd root
103 103
104 104 def splitroots(head, child, roots):
105 105 ellipsisroots[head].difference_update(roots)
106 106 ellipsisroots[head].add(child)
107 107 ellipsisroots[child].update(roots)
108 108 ellipsisroots[child].discard(child)
109 109
110 110 def splithead(head):
111 111 r1, r2, r3 = sorted(ellipsisroots[head])
112 112 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
113 113 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
114 114 nr1, head, nr2, head)
115 115 for j in mid:
116 116 if j == nr2:
117 117 return nr2, (nr1, nr2)
118 118 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
119 119 return j, (nr1, nr2)
120 120 raise error.Abort('Failed to split up ellipsis node! head: %d, '
121 121 'roots: %d %d %d' % (head, r1, r2, r3))
122 122
123 123 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
124 124 visit = reversed(missing)
125 125 relevant_nodes = set()
126 126 visitnodes = map(cl.node, missing)
127 127 required = set(headsrevs) | known
128 128 for rev in visit:
129 129 clrev = cl.changelogrevision(rev)
130 130 ps = cldag.parents(rev)
131 131 if depth is not None:
132 132 curdepth = revdepth[rev]
133 133 for p in ps:
134 134 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
135 135 needed = False
136 136 shallow_enough = depth is None or revdepth[rev] <= depth
137 137 if shallow_enough:
138 138 curmf = mfl[clrev.manifest].read()
139 139 if ps:
140 140 # We choose to not trust the changed files list in
141 141 # changesets because it's not always correct. TODO: could
142 142 # we trust it for the non-merge case?
143 143 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
144 144 needed = any(match(f) for f in curmf.diff(p1mf).iterkeys())
145 145 if not needed and len(ps) > 1:
146 146 # For merge changes, the list of changed files is not
147 147 # helpful, since we need to emit the merge if a file
148 148 # in the narrow spec has changed on either side of the
149 149 # merge. As a result, we do a manifest diff to check.
150 150 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
151 151 needed = any(match(f) for f in curmf.diff(p2mf).iterkeys())
152 152 else:
153 153 # For a root node, we need to include the node if any
154 154 # files in the node match the narrowspec.
155 155 needed = any(match(f) for f in curmf)
156 156
157 157 if needed:
158 158 for head in ellipsisheads[rev]:
159 159 addroot(head, rev)
160 160 for p in ps:
161 161 required.add(p)
162 162 relevant_nodes.add(cl.node(rev))
163 163 else:
164 164 if not ps:
165 165 ps = [nullrev]
166 166 if rev in required:
167 167 for head in ellipsisheads[rev]:
168 168 addroot(head, rev)
169 169 for p in ps:
170 170 ellipsisheads[p].add(rev)
171 171 else:
172 172 for p in ps:
173 173 ellipsisheads[p] |= ellipsisheads[rev]
174 174
175 175 # add common changesets as roots of their reachable ellipsis heads
176 176 for c in commonrevs:
177 177 for head in ellipsisheads[c]:
178 178 addroot(head, c)
179 179 return visitnodes, relevant_nodes, ellipsisroots
180 180
181 181 def _packellipsischangegroup(repo, common, match, relevant_nodes,
182 182 ellipsisroots, visitnodes, depth, source, version):
183 183 if version in ('01', '02'):
184 184 raise error.Abort(
185 185 'ellipsis nodes require at least cg3 on client and server, '
186 186 'but negotiated version %s' % version)
187 187 # We wrap cg1packer.revchunk, using a side channel to pass
188 188 # relevant_nodes into that area. Then if linknode isn't in the
189 189 # set, we know we have an ellipsis node and we should defer
190 190 # sending that node's data. We override close() to detect
191 191 # pending ellipsis nodes and flush them.
192 192 packer = changegroup.getbundler(version, repo)
193 193 # Let the packer have access to the narrow matcher so it can
194 194 # omit filelogs and dirlogs as needed
195 195 packer._narrow_matcher = lambda : match
196 196 # Give the packer the list of nodes which should not be
197 197 # ellipsis nodes. We store this rather than the set of nodes
198 198 # that should be an ellipsis because for very large histories
199 199 # we expect this to be significantly smaller.
200 200 packer.full_nodes = relevant_nodes
201 201 # Maps ellipsis revs to their roots at the changelog level.
202 202 packer.precomputed_ellipsis = ellipsisroots
203 203 # Maps CL revs to per-revlog revisions. Cleared in close() at
204 204 # the end of each group.
205 205 packer.clrev_to_localrev = {}
206 206 packer.next_clrev_to_localrev = {}
207 207 # Maps changelog nodes to changelog revs. Filled in once
208 208 # during changelog stage and then left unmodified.
209 209 packer.clnode_to_rev = {}
210 210 packer.changelog_done = False
211 211 # If true, informs the packer that it is serving shallow content and might
212 212 # need to pack file contents not introduced by the changes being packed.
213 213 packer.is_shallow = depth is not None
214 214
215 215 return packer.generate(common, visitnodes, False, source)
216 216
217 217 # Serve a changegroup for a client with a narrow clone.
218 218 def getbundlechangegrouppart_narrow(bundler, repo, source,
219 219 bundlecaps=None, b2caps=None, heads=None,
220 220 common=None, **kwargs):
221 221 cgversions = b2caps.get('changegroup')
222 222 getcgkwargs = {}
223 223 if cgversions: # 3.1 and 3.2 ship with an empty value
224 224 cgversions = [v for v in cgversions
225 225 if v in changegroup.supportedoutgoingversions(repo)]
226 226 if not cgversions:
227 227 raise ValueError(_('no common changegroup version'))
228 228 version = getcgkwargs['version'] = max(cgversions)
229 229 else:
230 230 raise ValueError(_("server does not advertise changegroup version,"
231 231 " can't negotiate support for ellipsis nodes"))
232 232
233 233 include = sorted(filter(bool, kwargs.get('includepats', [])))
234 234 exclude = sorted(filter(bool, kwargs.get('excludepats', [])))
235 235 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
236 236 if not repo.ui.configbool("experimental", "narrowservebrokenellipses"):
237 237 outgoing = exchange._computeoutgoing(repo, heads, common)
238 238 if not outgoing.missing:
239 239 return
240 240 def wrappedgetbundler(orig, *args, **kwargs):
241 241 bundler = orig(*args, **kwargs)
242 242 bundler._narrow_matcher = lambda : newmatch
243 243 return bundler
244 244 with extensions.wrappedfunction(changegroup, 'getbundler',
245 245 wrappedgetbundler):
246 246 cg = changegroup.makestream(repo, outgoing, version, source)
247 247 part = bundler.newpart('changegroup', data=cg)
248 248 part.addparam('version', version)
249 249 if 'treemanifest' in repo.requirements:
250 250 part.addparam('treemanifest', '1')
251 251
252 252 if include or exclude:
253 253 narrowspecpart = bundler.newpart(_SPECPART)
254 254 if include:
255 255 narrowspecpart.addparam(
256 256 _SPECPART_INCLUDE, '\n'.join(include), mandatory=True)
257 257 if exclude:
258 258 narrowspecpart.addparam(
259 259 _SPECPART_EXCLUDE, '\n'.join(exclude), mandatory=True)
260 260
261 261 return
262 262
263 263 depth = kwargs.get('depth', None)
264 264 if depth is not None:
265 265 depth = int(depth)
266 266 if depth < 1:
267 267 raise error.Abort(_('depth must be positive, got %d') % depth)
268 268
269 269 heads = set(heads or repo.heads())
270 270 common = set(common or [nullid])
271 271 oldinclude = sorted(filter(bool, kwargs.get('oldincludepats', [])))
272 272 oldexclude = sorted(filter(bool, kwargs.get('oldexcludepats', [])))
273 273 known = {bin(n) for n in kwargs.get('known', [])}
274 274 if known and (oldinclude != include or oldexclude != exclude):
275 275 # Steps:
276 276 # 1. Send kill for "$known & ::common"
277 277 #
278 278 # 2. Send changegroup for ::common
279 279 #
280 280 # 3. Proceed.
281 281 #
282 282 # In the future, we can send kills for only the specific
283 283 # nodes we know should go away or change shape, and then
284 284 # send a data stream that tells the client something like this:
285 285 #
286 286 # a) apply this changegroup
287 287 # b) apply nodes XXX, YYY, ZZZ that you already have
288 288 # c) goto a
289 289 #
290 290 # until they've built up the full new state.
291 291 # Convert to revnums and intersect with "common". The client should
292 292 # have made it a subset of "common" already, but let's be safe.
293 293 known = set(repo.revs("%ln & ::%ln", known, common))
294 294 # TODO: we could send only roots() of this set, and the
295 295 # list of nodes in common, and the client could work out
296 296 # what to strip, instead of us explicitly sending every
297 297 # single node.
298 298 deadrevs = known
299 299 def genkills():
300 300 for r in deadrevs:
301 301 yield _KILLNODESIGNAL
302 302 yield repo.changelog.node(r)
303 303 yield _DONESIGNAL
304 304 bundler.newpart(_CHANGESPECPART, data=genkills())
305 305 newvisit, newfull, newellipsis = _computeellipsis(
306 306 repo, set(), common, known, newmatch)
307 307 if newvisit:
308 308 cg = _packellipsischangegroup(
309 309 repo, common, newmatch, newfull, newellipsis,
310 310 newvisit, depth, source, version)
311 311 part = bundler.newpart('changegroup', data=cg)
312 312 part.addparam('version', version)
313 313 if 'treemanifest' in repo.requirements:
314 314 part.addparam('treemanifest', '1')
315 315
316 316 visitnodes, relevant_nodes, ellipsisroots = _computeellipsis(
317 317 repo, common, heads, set(), newmatch, depth=depth)
318 318
319 319 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
320 320 if visitnodes:
321 321 cg = _packellipsischangegroup(
322 322 repo, common, newmatch, relevant_nodes, ellipsisroots,
323 323 visitnodes, depth, source, version)
324 324 part = bundler.newpart('changegroup', data=cg)
325 325 part.addparam('version', version)
326 326 if 'treemanifest' in repo.requirements:
327 327 part.addparam('treemanifest', '1')
328 328
329 329 def applyacl_narrow(repo, kwargs):
330 330 username = repo.ui.shortuser(repo.ui.username())
331 331 user_includes = repo.ui.configlist(
332 332 _NARROWACL_SECTION, username + '.includes',
333 333 repo.ui.configlist(_NARROWACL_SECTION, 'default.includes'))
334 334 user_excludes = repo.ui.configlist(
335 335 _NARROWACL_SECTION, username + '.excludes',
336 336 repo.ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
337 337 if not user_includes:
338 338 raise error.Abort(_("{} configuration for user {} is empty")
339 339 .format(_NARROWACL_SECTION, username))
340 340
341 341 user_includes = [
342 342 'path:.' if p == '*' else 'path:' + p for p in user_includes]
343 343 user_excludes = [
344 344 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
345 345
346 346 req_includes = set(kwargs.get('includepats', []))
347 347 req_excludes = set(kwargs.get('excludepats', []))
348 348
349 349 invalid_includes = []
350 350 req_includes, req_excludes = narrowspec.restrictpatterns(
351 351 req_includes, req_excludes,
352 352 user_includes, user_excludes, invalid_includes)
353 353
354 354 if invalid_includes:
355 355 raise error.Abort(
356 356 _("The following includes are not accessible for {}: {}")
357 357 .format(username, invalid_includes))
358 358
359 359 new_args = {}
360 360 new_args.update(kwargs)
361 361 new_args['includepats'] = req_includes
362 362 if req_excludes:
363 363 new_args['excludepats'] = req_excludes
364 364 return new_args
365 365
366 366 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
367 367 def _handlechangespec_2(op, inpart):
368 368 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
369 369 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
370 370 narrowspec.save(op.repo, includepats, excludepats)
371 if not narrowrepo.requirement in op.repo.requirements:
372 op.repo.requirements.add(narrowrepo.requirement)
371 if not narrowrepo.REQUIREMENT in op.repo.requirements:
372 op.repo.requirements.add(narrowrepo.REQUIREMENT)
373 373 op.repo._writerequirements()
374 374 op.repo.invalidate(clearfilecache=True)
375 375
376 376 @bundle2.parthandler(_CHANGESPECPART)
377 377 def _handlechangespec(op, inpart):
378 378 repo = op.repo
379 379 cl = repo.changelog
380 380
381 381 # changesets which need to be stripped entirely. either they're no longer
382 382 # needed in the new narrow spec, or the server is sending a replacement
383 383 # in the changegroup part.
384 384 clkills = set()
385 385
386 386 # A changespec part contains all the updates to ellipsis nodes
387 387 # that will happen as a result of widening or narrowing a
388 388 # repo. All the changes that this block encounters are ellipsis
389 389 # nodes or flags to kill an existing ellipsis.
390 390 chunksignal = changegroup.readexactly(inpart, 4)
391 391 while chunksignal != _DONESIGNAL:
392 392 if chunksignal == _KILLNODESIGNAL:
393 393 # a node used to be an ellipsis but isn't anymore
394 394 ck = changegroup.readexactly(inpart, 20)
395 395 if cl.hasnode(ck):
396 396 clkills.add(ck)
397 397 else:
398 398 raise error.Abort(
399 399 _('unexpected changespec node chunk type: %s') % chunksignal)
400 400 chunksignal = changegroup.readexactly(inpart, 4)
401 401
402 402 if clkills:
403 403 # preserve bookmarks that repair.strip() would otherwise strip
404 404 bmstore = repo._bookmarks
405 405 class dummybmstore(dict):
406 406 def applychanges(self, repo, tr, changes):
407 407 pass
408 408 def recordchange(self, tr): # legacy version
409 409 pass
410 410 repo._bookmarks = dummybmstore()
411 411 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
412 412 topic='widen')
413 413 repo._bookmarks = bmstore
414 414 if chgrpfile:
415 415 # presence of _widen_bundle attribute activates widen handler later
416 416 op._widen_bundle = chgrpfile
417 417 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
418 418 # will currently always be there when using the core+narrowhg server, but
419 419 # other servers may include a changespec part even when not widening (e.g.
420 420 # because we're deepening a shallow repo).
421 421 if util.safehasattr(repo, 'setnewnarrowpats'):
422 422 repo.setnewnarrowpats()
423 423
424 424 def handlechangegroup_widen(op, inpart):
425 425 """Changegroup exchange handler which restores temporarily-stripped nodes"""
426 426 # We saved a bundle with stripped node data we must now restore.
427 427 # This approach is based on mercurial/repair.py@6ee26a53c111.
428 428 repo = op.repo
429 429 ui = op.ui
430 430
431 431 chgrpfile = op._widen_bundle
432 432 del op._widen_bundle
433 433 vfs = repo.vfs
434 434
435 435 ui.note(_("adding branch\n"))
436 436 f = vfs.open(chgrpfile, "rb")
437 437 try:
438 438 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
439 439 if not ui.verbose:
440 440 # silence internal shuffling chatter
441 441 ui.pushbuffer()
442 442 if isinstance(gen, bundle2.unbundle20):
443 443 with repo.transaction('strip') as tr:
444 444 bundle2.processbundle(repo, gen, lambda: tr)
445 445 else:
446 446 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
447 447 if not ui.verbose:
448 448 ui.popbuffer()
449 449 finally:
450 450 f.close()
451 451
452 452 # remove undo files
453 453 for undovfs, undofile in repo.undofiles():
454 454 try:
455 455 undovfs.unlink(undofile)
456 456 except OSError as e:
457 457 if e.errno != errno.ENOENT:
458 458 ui.warn(_('error removing %s: %s\n') %
459 459 (undovfs.join(undofile), str(e)))
460 460
461 461 # Remove partial backup only if there were no exceptions
462 462 vfs.unlink(chgrpfile)
463 463
464 464 def setup():
465 465 """Enable narrow repo support in bundle2-related extension points."""
466 466 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
467 467
468 468 wireproto.gboptsmap['narrow'] = 'boolean'
469 469 wireproto.gboptsmap['depth'] = 'plain'
470 470 wireproto.gboptsmap['oldincludepats'] = 'csv'
471 471 wireproto.gboptsmap['oldexcludepats'] = 'csv'
472 472 wireproto.gboptsmap['includepats'] = 'csv'
473 473 wireproto.gboptsmap['excludepats'] = 'csv'
474 474 wireproto.gboptsmap['known'] = 'csv'
475 475
476 476 # Extend changegroup serving to handle requests from narrow clients.
477 477 origcgfn = exchange.getbundle2partsmapping['changegroup']
478 478 def wrappedcgfn(*args, **kwargs):
479 479 repo = args[1]
480 480 if repo.ui.has_section(_NARROWACL_SECTION):
481 481 getbundlechangegrouppart_narrow(
482 482 *args, **applyacl_narrow(repo, kwargs))
483 483 elif kwargs.get('narrow', False):
484 484 getbundlechangegrouppart_narrow(*args, **kwargs)
485 485 else:
486 486 origcgfn(*args, **kwargs)
487 487 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
488 488
489 489 # Extend changegroup receiver so client can fixup after widen requests.
490 490 origcghandler = bundle2.parthandlermapping['changegroup']
491 491 def wrappedcghandler(op, inpart):
492 492 origcghandler(op, inpart)
493 493 if util.safehasattr(op, '_widen_bundle'):
494 494 handlechangegroup_widen(op, inpart)
495 495 wrappedcghandler.params = origcghandler.params
496 496 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,385 +1,385 b''
1 1 # narrowchangegroup.py - narrow clone changegroup creation and consumption
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial.i18n import _
11 11 from mercurial import (
12 12 changegroup,
13 13 error,
14 14 extensions,
15 15 manifest,
16 16 mdiff,
17 17 node,
18 18 util,
19 19 )
20 20
21 21 from . import (
22 22 narrowrepo,
23 23 narrowrevlog,
24 24 )
25 25
26 26 def setup():
27 27
28 28 def supportedoutgoingversions(orig, repo):
29 29 versions = orig(repo)
30 if narrowrepo.requirement in repo.requirements:
30 if narrowrepo.REQUIREMENT in repo.requirements:
31 31 versions.discard('01')
32 32 versions.discard('02')
33 33 return versions
34 34
35 35 extensions.wrapfunction(changegroup, 'supportedoutgoingversions',
36 36 supportedoutgoingversions)
37 37
38 38 def prune(orig, self, revlog, missing, commonrevs):
39 39 if isinstance(revlog, manifest.manifestrevlog):
40 40 matcher = getattr(self._repo, 'narrowmatch',
41 41 getattr(self, '_narrow_matcher', None))
42 42 if (matcher is not None and
43 43 not matcher().visitdir(revlog._dir[:-1] or '.')):
44 44 return []
45 45 return orig(self, revlog, missing, commonrevs)
46 46
47 47 extensions.wrapfunction(changegroup.cg1packer, 'prune', prune)
48 48
49 49 def generatefiles(orig, self, changedfiles, linknodes, commonrevs,
50 50 source):
51 51 matcher = getattr(self._repo, 'narrowmatch',
52 52 getattr(self, '_narrow_matcher', None))
53 53 if matcher is not None:
54 54 narrowmatch = matcher()
55 55 changedfiles = filter(narrowmatch, changedfiles)
56 56 if getattr(self, 'is_shallow', False):
57 57 # See comment in generate() for why this sadness is a thing.
58 58 mfdicts = self._mfdicts
59 59 del self._mfdicts
60 60 # In a shallow clone, the linknodes callback needs to also include
61 61 # those file nodes that are in the manifests we sent but weren't
62 62 # introduced by those manifests.
63 63 commonctxs = [self._repo[c] for c in commonrevs]
64 64 oldlinknodes = linknodes
65 65 clrev = self._repo.changelog.rev
66 66 def linknodes(flog, fname):
67 67 for c in commonctxs:
68 68 try:
69 69 fnode = c.filenode(fname)
70 70 self.clrev_to_localrev[c.rev()] = flog.rev(fnode)
71 71 except error.ManifestLookupError:
72 72 pass
73 73 links = oldlinknodes(flog, fname)
74 74 if len(links) != len(mfdicts):
75 75 for mf, lr in mfdicts:
76 76 fnode = mf.get(fname, None)
77 77 if fnode in links:
78 78 links[fnode] = min(links[fnode], lr, key=clrev)
79 79 elif fnode:
80 80 links[fnode] = lr
81 81 return links
82 82 return orig(self, changedfiles, linknodes, commonrevs, source)
83 83 extensions.wrapfunction(
84 84 changegroup.cg1packer, 'generatefiles', generatefiles)
85 85
86 86 def ellipsisdata(packer, rev, revlog, p1, p2, data, linknode):
87 87 n = revlog.node(rev)
88 88 p1n, p2n = revlog.node(p1), revlog.node(p2)
89 89 flags = revlog.flags(rev)
90 90 flags |= narrowrevlog.ELLIPSIS_NODE_FLAG
91 91 meta = packer.builddeltaheader(
92 92 n, p1n, p2n, node.nullid, linknode, flags)
93 93 # TODO: try and actually send deltas for ellipsis data blocks
94 94 diffheader = mdiff.trivialdiffheader(len(data))
95 95 l = len(meta) + len(diffheader) + len(data)
96 96 return ''.join((changegroup.chunkheader(l),
97 97 meta,
98 98 diffheader,
99 99 data))
100 100
101 101 def close(orig, self):
102 102 getattr(self, 'clrev_to_localrev', {}).clear()
103 103 if getattr(self, 'next_clrev_to_localrev', {}):
104 104 self.clrev_to_localrev = self.next_clrev_to_localrev
105 105 del self.next_clrev_to_localrev
106 106 self.changelog_done = True
107 107 return orig(self)
108 108 extensions.wrapfunction(changegroup.cg1packer, 'close', close)
109 109
110 110 # In a perfect world, we'd generate better ellipsis-ified graphs
111 111 # for non-changelog revlogs. In practice, we haven't started doing
112 112 # that yet, so the resulting DAGs for the manifestlog and filelogs
113 113 # are actually full of bogus parentage on all the ellipsis
114 114 # nodes. This has the side effect that, while the contents are
115 115 # correct, the individual DAGs might be completely out of whack in
116 116 # a case like 882681bc3166 and its ancestors (back about 10
117 117 # revisions or so) in the main hg repo.
118 118 #
119 119 # The one invariant we *know* holds is that the new (potentially
120 120 # bogus) DAG shape will be valid if we order the nodes in the
121 121 # order that they're introduced in dramatis personae by the
122 122 # changelog, so what we do is we sort the non-changelog histories
123 123 # by the order in which they are used by the changelog.
124 124 def _sortgroup(orig, self, revlog, nodelist, lookup):
125 125 if not util.safehasattr(self, 'full_nodes') or not self.clnode_to_rev:
126 126 return orig(self, revlog, nodelist, lookup)
127 127 key = lambda n: self.clnode_to_rev[lookup(n)]
128 128 return [revlog.rev(n) for n in sorted(nodelist, key=key)]
129 129
130 130 extensions.wrapfunction(changegroup.cg1packer, '_sortgroup', _sortgroup)
131 131
132 132 def generate(orig, self, commonrevs, clnodes, fastpathlinkrev, source):
133 133 '''yield a sequence of changegroup chunks (strings)'''
134 134 # Note: other than delegating to orig, the only deviation in
135 135 # logic from normal hg's generate is marked with BEGIN/END
136 136 # NARROW HACK.
137 137 if not util.safehasattr(self, 'full_nodes'):
138 138 # not sending a narrow bundle
139 139 for x in orig(self, commonrevs, clnodes, fastpathlinkrev, source):
140 140 yield x
141 141 return
142 142
143 143 repo = self._repo
144 144 cl = repo.changelog
145 145 mfl = repo.manifestlog
146 146 mfrevlog = mfl._revlog
147 147
148 148 clrevorder = {}
149 149 mfs = {} # needed manifests
150 150 fnodes = {} # needed file nodes
151 151 changedfiles = set()
152 152
153 153 # Callback for the changelog, used to collect changed files and manifest
154 154 # nodes.
155 155 # Returns the linkrev node (identity in the changelog case).
156 156 def lookupcl(x):
157 157 c = cl.read(x)
158 158 clrevorder[x] = len(clrevorder)
159 159 # BEGIN NARROW HACK
160 160 #
161 161 # Only update mfs if x is going to be sent. Otherwise we
162 162 # end up with bogus linkrevs specified for manifests and
163 163 # we skip some manifest nodes that we should otherwise
164 164 # have sent.
165 165 if x in self.full_nodes or cl.rev(x) in self.precomputed_ellipsis:
166 166 n = c[0]
167 167 # record the first changeset introducing this manifest version
168 168 mfs.setdefault(n, x)
169 169 # Set this narrow-specific dict so we have the lowest manifest
170 170 # revnum to look up for this cl revnum. (Part of mapping
171 171 # changelog ellipsis parents to manifest ellipsis parents)
172 172 self.next_clrev_to_localrev.setdefault(cl.rev(x),
173 173 mfrevlog.rev(n))
174 174 # We can't trust the changed files list in the changeset if the
175 175 # client requested a shallow clone.
176 176 if self.is_shallow:
177 177 changedfiles.update(mfl[c[0]].read().keys())
178 178 else:
179 179 changedfiles.update(c[3])
180 180 # END NARROW HACK
181 181 # Record a complete list of potentially-changed files in
182 182 # this manifest.
183 183 return x
184 184
185 185 self._verbosenote(_('uncompressed size of bundle content:\n'))
186 186 size = 0
187 187 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
188 188 size += len(chunk)
189 189 yield chunk
190 190 self._verbosenote(_('%8.i (changelog)\n') % size)
191 191
192 192 # We need to make sure that the linkrev in the changegroup refers to
193 193 # the first changeset that introduced the manifest or file revision.
194 194 # The fastpath is usually safer than the slowpath, because the filelogs
195 195 # are walked in revlog order.
196 196 #
197 197 # When taking the slowpath with reorder=None and the manifest revlog
198 198 # uses generaldelta, the manifest may be walked in the "wrong" order.
199 199 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
200 200 # cc0ff93d0c0c).
201 201 #
202 202 # When taking the fastpath, we are only vulnerable to reordering
203 203 # of the changelog itself. The changelog never uses generaldelta, so
204 204 # it is only reordered when reorder=True. To handle this case, we
205 205 # simply take the slowpath, which already has the 'clrevorder' logic.
206 206 # This was also fixed in cc0ff93d0c0c.
207 207 fastpathlinkrev = fastpathlinkrev and not self._reorder
208 208 # Treemanifests don't work correctly with fastpathlinkrev
209 209 # either, because we don't discover which directory nodes to
210 210 # send along with files. This could probably be fixed.
211 211 fastpathlinkrev = fastpathlinkrev and (
212 212 'treemanifest' not in repo.requirements)
213 213 # Shallow clones also don't work correctly with fastpathlinkrev
214 214 # because file nodes may need to be sent for a manifest even if they
215 215 # weren't introduced by that manifest.
216 216 fastpathlinkrev = fastpathlinkrev and not self.is_shallow
217 217
218 218 moreargs = []
219 219 if self.generatemanifests.func_code.co_argcount == 7:
220 220 # The source argument was added to generatemanifests in hg in
221 221 # 75cc1f1e11f2 (2017/09/11).
222 222 moreargs.append(source)
223 223 for chunk in self.generatemanifests(commonrevs, clrevorder,
224 224 fastpathlinkrev, mfs, fnodes, *moreargs):
225 225 yield chunk
226 226 # BEGIN NARROW HACK
227 227 mfdicts = None
228 228 if self.is_shallow:
229 229 mfdicts = [(self._repo.manifestlog[n].read(), lr)
230 230 for (n, lr) in mfs.iteritems()]
231 231 # END NARROW HACK
232 232 mfs.clear()
233 233 clrevs = set(cl.rev(x) for x in clnodes)
234 234
235 235 if not fastpathlinkrev:
236 236 def linknodes(unused, fname):
237 237 return fnodes.get(fname, {})
238 238 else:
239 239 cln = cl.node
240 240 def linknodes(filerevlog, fname):
241 241 llr = filerevlog.linkrev
242 242 fln = filerevlog.node
243 243 revs = ((r, llr(r)) for r in filerevlog)
244 244 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
245 245
246 246 # BEGIN NARROW HACK
247 247 #
248 248 # We need to pass the mfdicts variable down into
249 249 # generatefiles(), but more than one command might have
250 250 # wrapped generatefiles so we can't modify the function
251 251 # signature. Instead, we pass the data to ourselves using an
252 252 # instance attribute. I'm sorry.
253 253 self._mfdicts = mfdicts
254 254 # END NARROW HACK
255 255 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
256 256 source):
257 257 yield chunk
258 258
259 259 yield self.close()
260 260
261 261 if clnodes:
262 262 repo.hook('outgoing', node=node.hex(clnodes[0]), source=source)
263 263 extensions.wrapfunction(changegroup.cg1packer, 'generate', generate)
264 264
265 265 def revchunk(orig, self, revlog, rev, prev, linknode):
266 266 if not util.safehasattr(self, 'full_nodes'):
267 267 # not sending a narrow changegroup
268 268 for x in orig(self, revlog, rev, prev, linknode):
269 269 yield x
270 270 return
271 271 # build up some mapping information that's useful later. See
272 272 # the local() nested function below.
273 273 if not self.changelog_done:
274 274 self.clnode_to_rev[linknode] = rev
275 275 linkrev = rev
276 276 self.clrev_to_localrev[linkrev] = rev
277 277 else:
278 278 linkrev = self.clnode_to_rev[linknode]
279 279 self.clrev_to_localrev[linkrev] = rev
280 280 # This is a node to send in full, because the changeset it
281 281 # corresponds to was a full changeset.
282 282 if linknode in self.full_nodes:
283 283 for x in orig(self, revlog, rev, prev, linknode):
284 284 yield x
285 285 return
286 286 # At this point, a node can either be one we should skip or an
287 287 # ellipsis. If it's not an ellipsis, bail immediately.
288 288 if linkrev not in self.precomputed_ellipsis:
289 289 return
290 290 linkparents = self.precomputed_ellipsis[linkrev]
291 291 def local(clrev):
292 292 """Turn a changelog revnum into a local revnum.
293 293
294 294 The ellipsis dag is stored as revnums on the changelog,
295 295 but when we're producing ellipsis entries for
296 296 non-changelog revlogs, we need to turn those numbers into
297 297 something local. This does that for us, and during the
298 298 changelog sending phase will also expand the stored
299 299 mappings as needed.
300 300 """
301 301 if clrev == node.nullrev:
302 302 return node.nullrev
303 303 if not self.changelog_done:
304 304 # If we're doing the changelog, it's possible that we
305 305 # have a parent that is already on the client, and we
306 306 # need to store some extra mapping information so that
307 307 # our contained ellipsis nodes will be able to resolve
308 308 # their parents.
309 309 if clrev not in self.clrev_to_localrev:
310 310 clnode = revlog.node(clrev)
311 311 self.clnode_to_rev[clnode] = clrev
312 312 return clrev
313 313 # Walk the ellipsis-ized changelog breadth-first looking for a
314 314 # change that has been linked from the current revlog.
315 315 #
316 316 # For a flat manifest revlog only a single step should be necessary
317 317 # as all relevant changelog entries are relevant to the flat
318 318 # manifest.
319 319 #
320 320 # For a filelog or tree manifest dirlog however not every changelog
321 321 # entry will have been relevant, so we need to skip some changelog
322 322 # nodes even after ellipsis-izing.
323 323 walk = [clrev]
324 324 while walk:
325 325 p = walk[0]
326 326 walk = walk[1:]
327 327 if p in self.clrev_to_localrev:
328 328 return self.clrev_to_localrev[p]
329 329 elif p in self.full_nodes:
330 330 walk.extend([pp for pp in self._repo.changelog.parentrevs(p)
331 331 if pp != node.nullrev])
332 332 elif p in self.precomputed_ellipsis:
333 333 walk.extend([pp for pp in self.precomputed_ellipsis[p]
334 334 if pp != node.nullrev])
335 335 else:
336 336 # In this case, we've got an ellipsis with parents
337 337 # outside the current bundle (likely an
338 338 # incremental pull). We "know" that we can use the
339 339 # value of this same revlog at whatever revision
340 340 # is pointed to by linknode. "Know" is in scare
341 341 # quotes because I haven't done enough examination
342 342 # of edge cases to convince myself this is really
343 343 # a fact - it works for all the (admittedly
344 344 # thorough) cases in our testsuite, but I would be
345 345 # somewhat unsurprised to find a case in the wild
346 346 # where this breaks down a bit. That said, I don't
347 347 # know if it would hurt anything.
348 348 for i in xrange(rev, 0, -1):
349 349 if revlog.linkrev(i) == clrev:
350 350 return i
351 351 # We failed to resolve a parent for this node, so
352 352 # we crash the changegroup construction.
353 353 raise error.Abort(
354 354 'unable to resolve parent while packing %r %r'
355 355 ' for changeset %r' % (revlog.indexfile, rev, clrev))
356 356 return node.nullrev
357 357
358 358 if not linkparents or (
359 359 revlog.parentrevs(rev) == (node.nullrev, node.nullrev)):
360 360 p1, p2 = node.nullrev, node.nullrev
361 361 elif len(linkparents) == 1:
362 362 p1, = sorted(local(p) for p in linkparents)
363 363 p2 = node.nullrev
364 364 else:
365 365 p1, p2 = sorted(local(p) for p in linkparents)
366 366 yield ellipsisdata(
367 367 self, rev, revlog, p1, p2, revlog.revision(rev), linknode)
368 368 extensions.wrapfunction(changegroup.cg1packer, 'revchunk', revchunk)
369 369
370 370 def deltaparent(orig, self, revlog, rev, p1, p2, prev):
371 371 if util.safehasattr(self, 'full_nodes'):
372 372 # TODO: send better deltas when in narrow mode.
373 373 #
374 374 # changegroup.group() loops over revisions to send,
375 375 # including revisions we'll skip. What this means is that
376 376 # `prev` will be a potentially useless delta base for all
377 377 # ellipsis nodes, as the client likely won't have it. In
378 378 # the future we should do bookkeeping about which nodes
379 379 # have been sent to the client, and try to be
380 380 # significantly smarter about delta bases. This is
381 381 # slightly tricky because this same code has to work for
382 382 # all revlogs, and we don't have the linkrev/linknode here.
383 383 return p1
384 384 return orig(self, revlog, rev, p1, p2, prev)
385 385 extensions.wrapfunction(changegroup.cg2packer, 'deltaparent', deltaparent)
@@ -1,402 +1,402 b''
1 1 # narrowcommands.py - command modifications for narrowhg extension
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from __future__ import absolute_import
8 8
9 9 import itertools
10 10
11 11 from mercurial.i18n import _
12 12 from mercurial import (
13 13 cmdutil,
14 14 commands,
15 15 discovery,
16 16 error,
17 17 exchange,
18 18 extensions,
19 19 hg,
20 20 merge,
21 21 node,
22 22 registrar,
23 23 repair,
24 24 repoview,
25 25 util,
26 26 )
27 27
28 28 from . import (
29 29 narrowbundle2,
30 30 narrowrepo,
31 31 narrowspec,
32 32 )
33 33
34 34 table = {}
35 35 command = registrar.command(table)
36 36
37 37 def setup():
38 38 """Wraps user-facing mercurial commands with narrow-aware versions."""
39 39
40 40 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
41 41 entry[1].append(('', 'narrow', None,
42 42 _("create a narrow clone of select files")))
43 43 entry[1].append(('', 'depth', '',
44 44 _("limit the history fetched by distance from heads")))
45 45 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
46 46 if 'sparse' not in extensions.enabled():
47 47 entry[1].append(('', 'include', [],
48 48 _("specifically fetch this file/directory")))
49 49 entry[1].append(
50 50 ('', 'exclude', [],
51 51 _("do not fetch this file/directory, even if included")))
52 52
53 53 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
54 54 entry[1].append(('', 'depth', '',
55 55 _("limit the history fetched by distance from heads")))
56 56
57 57 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
58 58
59 59 def expandpull(pullop, includepats, excludepats):
60 60 if not narrowspec.needsexpansion(includepats):
61 61 return includepats, excludepats
62 62
63 63 heads = pullop.heads or pullop.rheads
64 64 includepats, excludepats = pullop.remote.expandnarrow(
65 65 includepats, excludepats, heads)
66 66 pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % (
67 67 includepats, excludepats))
68 68 return set(includepats), set(excludepats)
69 69
70 70 def clonenarrowcmd(orig, ui, repo, *args, **opts):
71 71 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
72 72 wrappedextraprepare = util.nullcontextmanager()
73 73 opts_narrow = opts['narrow']
74 74 if opts_narrow:
75 75 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
76 76 # Create narrow spec patterns from clone flags
77 77 includepats = narrowspec.parsepatterns(opts['include'])
78 78 excludepats = narrowspec.parsepatterns(opts['exclude'])
79 79
80 80 # If necessary, ask the server to expand the narrowspec.
81 81 includepats, excludepats = expandpull(
82 82 pullop, includepats, excludepats)
83 83
84 84 if not includepats and excludepats:
85 85 # If nothing was included, we assume the user meant to include
86 86 # everything, except what they asked to exclude.
87 87 includepats = {'path:.'}
88 88
89 89 narrowspec.save(pullop.repo, includepats, excludepats)
90 90
91 91 # This will populate 'includepats' etc with the values from the
92 92 # narrowspec we just saved.
93 93 orig(pullop, kwargs)
94 94
95 95 if opts.get('depth'):
96 96 kwargs['depth'] = opts['depth']
97 97 wrappedextraprepare = extensions.wrappedfunction(exchange,
98 98 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
99 99
100 100 def pullnarrow(orig, repo, *args, **kwargs):
101 101 narrowrepo.wraprepo(repo.unfiltered(), opts_narrow)
102 102 if isinstance(repo, repoview.repoview):
103 103 repo.__class__.__bases__ = (repo.__class__.__bases__[0],
104 104 repo.unfiltered().__class__)
105 105 if opts_narrow:
106 repo.requirements.add(narrowrepo.requirement)
106 repo.requirements.add(narrowrepo.REQUIREMENT)
107 107 repo._writerequirements()
108 108
109 109 return orig(repo, *args, **kwargs)
110 110
111 111 wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow)
112 112
113 113 with wrappedextraprepare, wrappedpull:
114 114 return orig(ui, repo, *args, **opts)
115 115
116 116 def pullnarrowcmd(orig, ui, repo, *args, **opts):
117 117 """Wraps pull command to allow modifying narrow spec."""
118 118 wrappedextraprepare = util.nullcontextmanager()
119 if narrowrepo.requirement in repo.requirements:
119 if narrowrepo.REQUIREMENT in repo.requirements:
120 120
121 121 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
122 122 orig(pullop, kwargs)
123 123 if opts.get('depth'):
124 124 kwargs['depth'] = opts['depth']
125 125 wrappedextraprepare = extensions.wrappedfunction(exchange,
126 126 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
127 127
128 128 with wrappedextraprepare:
129 129 return orig(ui, repo, *args, **opts)
130 130
131 131 def archivenarrowcmd(orig, ui, repo, *args, **opts):
132 132 """Wraps archive command to narrow the default includes."""
133 if narrowrepo.requirement in repo.requirements:
133 if narrowrepo.REQUIREMENT in repo.requirements:
134 134 repo_includes, repo_excludes = repo.narrowpats
135 135 includes = set(opts.get('include', []))
136 136 excludes = set(opts.get('exclude', []))
137 137 includes, excludes = narrowspec.restrictpatterns(
138 138 includes, excludes, repo_includes, repo_excludes)
139 139 if includes:
140 140 opts['include'] = includes
141 141 if excludes:
142 142 opts['exclude'] = excludes
143 143 return orig(ui, repo, *args, **opts)
144 144
145 145 def pullbundle2extraprepare(orig, pullop, kwargs):
146 146 repo = pullop.repo
147 if narrowrepo.requirement not in repo.requirements:
147 if narrowrepo.REQUIREMENT not in repo.requirements:
148 148 return orig(pullop, kwargs)
149 149
150 150 if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
151 151 raise error.Abort(_("server doesn't support narrow clones"))
152 152 orig(pullop, kwargs)
153 153 kwargs['narrow'] = True
154 154 include, exclude = repo.narrowpats
155 155 kwargs['oldincludepats'] = include
156 156 kwargs['oldexcludepats'] = exclude
157 157 kwargs['includepats'] = include
158 158 kwargs['excludepats'] = exclude
159 159 kwargs['known'] = [node.hex(ctx.node()) for ctx in
160 160 repo.set('::%ln', pullop.common)
161 161 if ctx.node() != node.nullid]
162 162 if not kwargs['known']:
163 163 # Mercurial serialized an empty list as '' and deserializes it as
164 164 # [''], so delete it instead to avoid handling the empty string on the
165 165 # server.
166 166 del kwargs['known']
167 167
168 168 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
169 169 pullbundle2extraprepare)
170 170
171 171 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
172 172 newincludes, newexcludes, force):
173 173 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
174 174 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
175 175
176 176 # This is essentially doing "hg outgoing" to find all local-only
177 177 # commits. We will then check that the local-only commits don't
178 178 # have any changes to files that will be untracked.
179 179 unfi = repo.unfiltered()
180 180 outgoing = discovery.findcommonoutgoing(unfi, remote,
181 181 commoninc=commoninc)
182 182 ui.status(_('looking for local changes to affected paths\n'))
183 183 localnodes = []
184 184 for n in itertools.chain(outgoing.missing, outgoing.excluded):
185 185 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
186 186 localnodes.append(n)
187 187 revstostrip = unfi.revs('descendants(%ln)', localnodes)
188 188 hiddenrevs = repoview.filterrevs(repo, 'visible')
189 189 visibletostrip = list(repo.changelog.node(r)
190 190 for r in (revstostrip - hiddenrevs))
191 191 if visibletostrip:
192 192 ui.status(_('The following changeset(s) or their ancestors have '
193 193 'local changes not on the remote:\n'))
194 194 maxnodes = 10
195 195 if ui.verbose or len(visibletostrip) <= maxnodes:
196 196 for n in visibletostrip:
197 197 ui.status('%s\n' % node.short(n))
198 198 else:
199 199 for n in visibletostrip[:maxnodes]:
200 200 ui.status('%s\n' % node.short(n))
201 201 ui.status(_('...and %d more, use --verbose to list all\n') %
202 202 (len(visibletostrip) - maxnodes))
203 203 if not force:
204 204 raise error.Abort(_('local changes found'),
205 205 hint=_('use --force-delete-local-changes to '
206 206 'ignore'))
207 207
208 208 if revstostrip:
209 209 tostrip = [unfi.changelog.node(r) for r in revstostrip]
210 210 if repo['.'].node() in tostrip:
211 211 # stripping working copy, so move to a different commit first
212 212 urev = max(repo.revs('(::%n) - %ln + null',
213 213 repo['.'].node(), visibletostrip))
214 214 hg.clean(repo, urev)
215 215 repair.strip(ui, unfi, tostrip, topic='narrow')
216 216
217 217 todelete = []
218 218 for f, f2, size in repo.store.datafiles():
219 219 if f.startswith('data/'):
220 220 file = f[5:-2]
221 221 if not newmatch(file):
222 222 todelete.append(f)
223 223 elif f.startswith('meta/'):
224 224 dir = f[5:-13]
225 225 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
226 226 include = True
227 227 for d in dirs:
228 228 visit = newmatch.visitdir(d)
229 229 if not visit:
230 230 include = False
231 231 break
232 232 if visit == 'all':
233 233 break
234 234 if not include:
235 235 todelete.append(f)
236 236
237 237 repo.destroying()
238 238
239 239 with repo.transaction("narrowing"):
240 240 for f in todelete:
241 241 ui.status(_('deleting %s\n') % f)
242 242 util.unlinkpath(repo.svfs.join(f))
243 243 repo.store.markremoved(f)
244 244
245 245 for f in repo.dirstate:
246 246 if not newmatch(f):
247 247 repo.dirstate.drop(f)
248 248 repo.wvfs.unlinkpath(f)
249 249 repo.setnarrowpats(newincludes, newexcludes)
250 250
251 251 repo.destroyed()
252 252
253 253 def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
254 254 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
255 255
256 256 # TODO(martinvonz): Get expansion working with widening/narrowing.
257 257 if narrowspec.needsexpansion(newincludes):
258 258 raise error.Abort('Expansion not yet supported on pull')
259 259
260 260 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
261 261 orig(pullop, kwargs)
262 262 # The old{in,ex}cludepats have already been set by orig()
263 263 kwargs['includepats'] = newincludes
264 264 kwargs['excludepats'] = newexcludes
265 265 wrappedextraprepare = extensions.wrappedfunction(exchange,
266 266 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
267 267
268 268 # define a function that narrowbundle2 can call after creating the
269 269 # backup bundle, but before applying the bundle from the server
270 270 def setnewnarrowpats():
271 271 repo.setnarrowpats(newincludes, newexcludes)
272 272 repo.setnewnarrowpats = setnewnarrowpats
273 273
274 274 ds = repo.dirstate
275 275 p1, p2 = ds.p1(), ds.p2()
276 276 with ds.parentchange():
277 277 ds.setparents(node.nullid, node.nullid)
278 278 common = commoninc[0]
279 279 with wrappedextraprepare:
280 280 exchange.pull(repo, remote, heads=common)
281 281 with ds.parentchange():
282 282 ds.setparents(p1, p2)
283 283
284 284 actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
285 285 addgaction = actions['g'].append
286 286
287 287 mf = repo['.'].manifest().matches(newmatch)
288 288 for f, fn in mf.iteritems():
289 289 if f not in repo.dirstate:
290 290 addgaction((f, (mf.flags(f), False),
291 291 "add from widened narrow clone"))
292 292
293 293 merge.applyupdates(repo, actions, wctx=repo[None],
294 294 mctx=repo['.'], overwrite=False)
295 295 merge.recordupdates(repo, actions, branchmerge=False)
296 296
297 297 # TODO(rdamazio): Make new matcher format and update description
298 298 @command('tracked',
299 299 [('', 'addinclude', [], _('new paths to include')),
300 300 ('', 'removeinclude', [], _('old paths to no longer include')),
301 301 ('', 'addexclude', [], _('new paths to exclude')),
302 302 ('', 'removeexclude', [], _('old paths to no longer exclude')),
303 303 ('', 'clear', False, _('whether to replace the existing narrowspec')),
304 304 ('', 'force-delete-local-changes', False,
305 305 _('forces deletion of local changes when narrowing')),
306 306 ] + commands.remoteopts,
307 307 _('[OPTIONS]... [REMOTE]'),
308 308 inferrepo=True)
309 309 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
310 310 """show or change the current narrowspec
311 311
312 312 With no argument, shows the current narrowspec entries, one per line. Each
313 313 line will be prefixed with 'I' or 'X' for included or excluded patterns,
314 314 respectively.
315 315
316 316 The narrowspec is comprised of expressions to match remote files and/or
317 317 directories that should be pulled into your client.
318 318 The narrowspec has *include* and *exclude* expressions, with excludes always
319 319 trumping includes: that is, if a file matches an exclude expression, it will
320 320 be excluded even if it also matches an include expression.
321 321 Excluding files that were never included has no effect.
322 322
323 323 Each included or excluded entry is in the format described by
324 324 'hg help patterns'.
325 325
326 326 The options allow you to add or remove included and excluded expressions.
327 327
328 328 If --clear is specified, then all previous includes and excludes are DROPPED
329 329 and replaced by the new ones specified to --addinclude and --addexclude.
330 330 If --clear is specified without any further options, the narrowspec will be
331 331 empty and will not match any files.
332 332 """
333 if narrowrepo.requirement not in repo.requirements:
333 if narrowrepo.REQUIREMENT not in repo.requirements:
334 334 ui.warn(_('The narrow command is only supported on respositories cloned'
335 335 ' with --narrow.\n'))
336 336 return 1
337 337
338 338 # Before supporting, decide whether it "hg tracked --clear" should mean
339 339 # tracking no paths or all paths.
340 340 if opts['clear']:
341 341 ui.warn(_('The --clear option is not yet supported.\n'))
342 342 return 1
343 343
344 344 if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
345 345 raise error.Abort('Expansion not yet supported on widen/narrow')
346 346
347 347 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
348 348 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
349 349 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
350 350 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
351 351 widening = addedincludes or removedexcludes
352 352 narrowing = removedincludes or addedexcludes
353 353 only_show = not widening and not narrowing
354 354
355 355 # Only print the current narrowspec.
356 356 if only_show:
357 357 include, exclude = repo.narrowpats
358 358
359 359 ui.pager('tracked')
360 360 fm = ui.formatter('narrow', opts)
361 361 for i in sorted(include):
362 362 fm.startitem()
363 363 fm.write('status', '%s ', 'I', label='narrow.included')
364 364 fm.write('pat', '%s\n', i, label='narrow.included')
365 365 for i in sorted(exclude):
366 366 fm.startitem()
367 367 fm.write('status', '%s ', 'X', label='narrow.excluded')
368 368 fm.write('pat', '%s\n', i, label='narrow.excluded')
369 369 fm.end()
370 370 return 0
371 371
372 372 with repo.wlock(), repo.lock():
373 373 cmdutil.bailifchanged(repo)
374 374
375 375 # Find the revisions we have in common with the remote. These will
376 376 # be used for finding local-only changes for narrowing. They will
377 377 # also define the set of revisions to update for widening.
378 378 remotepath = ui.expandpath(remotepath or 'default')
379 379 url, branches = hg.parseurl(remotepath)
380 380 ui.status(_('comparing with %s\n') % util.hidepassword(url))
381 381 remote = hg.peer(repo, opts, url)
382 382 commoninc = discovery.findcommonincoming(repo, remote)
383 383
384 384 oldincludes, oldexcludes = repo.narrowpats
385 385 if narrowing:
386 386 newincludes = oldincludes - removedincludes
387 387 newexcludes = oldexcludes | addedexcludes
388 388 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
389 389 newincludes, newexcludes,
390 390 opts['force_delete_local_changes'])
391 391 # _narrow() updated the narrowspec and _widen() below needs to
392 392 # use the updated values as its base (otherwise removed includes
393 393 # and addedexcludes will be lost in the resulting narrowspec)
394 394 oldincludes = newincludes
395 395 oldexcludes = newexcludes
396 396
397 397 if widening:
398 398 newincludes = oldincludes | addedincludes
399 399 newexcludes = oldexcludes - removedexcludes
400 400 _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
401 401
402 402 return 0
@@ -1,110 +1,110 b''
1 1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from mercurial import (
11 11 bundlerepo,
12 12 localrepo,
13 13 match as matchmod,
14 14 scmutil,
15 15 )
16 16
17 17 from .. import (
18 18 share,
19 19 )
20 20
21 21 from . import (
22 22 narrowrevlog,
23 23 narrowspec,
24 24 )
25 25
26 requirement = 'narrowhg'
26 REQUIREMENT = 'narrowhg'
27 27
28 28 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
29 29 orig(sourcerepo, destrepo, **kwargs)
30 if requirement in sourcerepo.requirements:
30 if REQUIREMENT in sourcerepo.requirements:
31 31 with destrepo.wlock():
32 32 with destrepo.vfs('shared', 'a') as fp:
33 33 fp.write(narrowspec.FILENAME + '\n')
34 34
35 35 def unsharenarrowspec(orig, ui, repo, repopath):
36 if (requirement in repo.requirements
36 if (REQUIREMENT in repo.requirements
37 37 and repo.path == repopath and repo.shared()):
38 38 srcrepo = share._getsrcrepo(repo)
39 39 with srcrepo.vfs(narrowspec.FILENAME) as f:
40 40 spec = f.read()
41 41 with repo.vfs(narrowspec.FILENAME, 'w') as f:
42 42 f.write(spec)
43 43 return orig(ui, repo, repopath)
44 44
45 45 def wraprepo(repo, opts_narrow):
46 46 """Enables narrow clone functionality on a single local repository."""
47 47
48 48 cacheprop = localrepo.storecache
49 49 if isinstance(repo, bundlerepo.bundlerepository):
50 50 # We have to use a different caching property decorator for
51 51 # bundlerepo because storecache blows up in strange ways on a
52 52 # bundlerepo. Fortunately, there's no risk of data changing in
53 53 # a bundlerepo.
54 54 cacheprop = lambda name: localrepo.unfilteredpropertycache
55 55
56 56 class narrowrepository(repo.__class__):
57 57
58 58 def _constructmanifest(self):
59 59 manifest = super(narrowrepository, self)._constructmanifest()
60 60 narrowrevlog.makenarrowmanifestrevlog(manifest, repo)
61 61 return manifest
62 62
63 63 @cacheprop('00manifest.i')
64 64 def manifestlog(self):
65 65 mfl = super(narrowrepository, self).manifestlog
66 66 narrowrevlog.makenarrowmanifestlog(mfl, self)
67 67 return mfl
68 68
69 69 def file(self, f):
70 70 fl = super(narrowrepository, self).file(f)
71 71 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
72 72 return fl
73 73
74 74 @localrepo.repofilecache(narrowspec.FILENAME)
75 75 def narrowpats(self):
76 76 return narrowspec.load(self)
77 77
78 78 @localrepo.repofilecache(narrowspec.FILENAME)
79 79 def _narrowmatch(self):
80 80 include, exclude = self.narrowpats
81 81 if not opts_narrow and not include and not exclude:
82 82 return matchmod.always(self.root, '')
83 83 return narrowspec.match(self.root, include=include, exclude=exclude)
84 84
85 85 # TODO(martinvonz): make this property-like instead?
86 86 def narrowmatch(self):
87 87 return self._narrowmatch
88 88
89 89 def setnarrowpats(self, newincludes, newexcludes):
90 90 narrowspec.save(self, newincludes, newexcludes)
91 91 self.invalidate(clearfilecache=True)
92 92
93 93 # I'm not sure this is the right place to do this filter.
94 94 # context._manifestmatches() would probably be better, or perhaps
95 95 # move it to a later place, in case some of the callers do want to know
96 96 # which directories changed. This seems to work for now, though.
97 97 def status(self, *args, **kwargs):
98 98 s = super(narrowrepository, self).status(*args, **kwargs)
99 99 narrowmatch = self.narrowmatch()
100 100 modified = filter(narrowmatch, s.modified)
101 101 added = filter(narrowmatch, s.added)
102 102 removed = filter(narrowmatch, s.removed)
103 103 deleted = filter(narrowmatch, s.deleted)
104 104 unknown = filter(narrowmatch, s.unknown)
105 105 ignored = filter(narrowmatch, s.ignored)
106 106 clean = filter(narrowmatch, s.clean)
107 107 return scmutil.status(modified, added, removed, deleted, unknown,
108 108 ignored, clean)
109 109
110 110 repo.__class__ = narrowrepository
General Comments 0
You need to be logged in to leave comments. Login now