##// END OF EJS Templates
narrow: widen when files are excluded by sparse and not included by narrow...
Charles Chamberlain -
r48084:e4ccc341 default
parent child Browse files
Show More
@@ -1,353 +1,356 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .pycompat import getattr
11 from .pycompat import getattr
12 from . import (
12 from . import (
13 error,
13 error,
14 match as matchmod,
14 match as matchmod,
15 merge,
15 merge,
16 mergestate as mergestatemod,
16 mergestate as mergestatemod,
17 requirements,
17 requirements,
18 scmutil,
18 scmutil,
19 sparse,
19 sparse,
20 util,
20 util,
21 )
21 )
22
22
23 # The file in .hg/store/ that indicates which paths exit in the store
23 # The file in .hg/store/ that indicates which paths exit in the store
24 FILENAME = b'narrowspec'
24 FILENAME = b'narrowspec'
25 # The file in .hg/ that indicates which paths exit in the dirstate
25 # The file in .hg/ that indicates which paths exit in the dirstate
26 DIRSTATE_FILENAME = b'narrowspec.dirstate'
26 DIRSTATE_FILENAME = b'narrowspec.dirstate'
27
27
28 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 # Pattern prefixes that are allowed in narrow patterns. This list MUST
29 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 # only contain patterns that are fast and safe to evaluate. Keep in mind
30 # that patterns are supplied by clients and executed on remote servers
30 # that patterns are supplied by clients and executed on remote servers
31 # as part of wire protocol commands. That means that changes to this
31 # as part of wire protocol commands. That means that changes to this
32 # data structure influence the wire protocol and should not be taken
32 # data structure influence the wire protocol and should not be taken
33 # lightly - especially removals.
33 # lightly - especially removals.
34 VALID_PREFIXES = (
34 VALID_PREFIXES = (
35 b'path:',
35 b'path:',
36 b'rootfilesin:',
36 b'rootfilesin:',
37 )
37 )
38
38
39
39
40 def normalizesplitpattern(kind, pat):
40 def normalizesplitpattern(kind, pat):
41 """Returns the normalized version of a pattern and kind.
41 """Returns the normalized version of a pattern and kind.
42
42
43 Returns a tuple with the normalized kind and normalized pattern.
43 Returns a tuple with the normalized kind and normalized pattern.
44 """
44 """
45 pat = pat.rstrip(b'/')
45 pat = pat.rstrip(b'/')
46 _validatepattern(pat)
46 _validatepattern(pat)
47 return kind, pat
47 return kind, pat
48
48
49
49
50 def _numlines(s):
50 def _numlines(s):
51 """Returns the number of lines in s, including ending empty lines."""
51 """Returns the number of lines in s, including ending empty lines."""
52 # We use splitlines because it is Unicode-friendly and thus Python 3
52 # We use splitlines because it is Unicode-friendly and thus Python 3
53 # compatible. However, it does not count empty lines at the end, so trick
53 # compatible. However, it does not count empty lines at the end, so trick
54 # it by adding a character at the end.
54 # it by adding a character at the end.
55 return len((s + b'x').splitlines())
55 return len((s + b'x').splitlines())
56
56
57
57
58 def _validatepattern(pat):
58 def _validatepattern(pat):
59 """Validates the pattern and aborts if it is invalid.
59 """Validates the pattern and aborts if it is invalid.
60
60
61 Patterns are stored in the narrowspec as newline-separated
61 Patterns are stored in the narrowspec as newline-separated
62 POSIX-style bytestring paths. There's no escaping.
62 POSIX-style bytestring paths. There's no escaping.
63 """
63 """
64
64
65 # We use newlines as separators in the narrowspec file, so don't allow them
65 # We use newlines as separators in the narrowspec file, so don't allow them
66 # in patterns.
66 # in patterns.
67 if _numlines(pat) > 1:
67 if _numlines(pat) > 1:
68 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
68 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
69
69
70 components = pat.split(b'/')
70 components = pat.split(b'/')
71 if b'.' in components or b'..' in components:
71 if b'.' in components or b'..' in components:
72 raise error.Abort(
72 raise error.Abort(
73 _(b'"." and ".." are not allowed in narrowspec paths')
73 _(b'"." and ".." are not allowed in narrowspec paths')
74 )
74 )
75
75
76
76
77 def normalizepattern(pattern, defaultkind=b'path'):
77 def normalizepattern(pattern, defaultkind=b'path'):
78 """Returns the normalized version of a text-format pattern.
78 """Returns the normalized version of a text-format pattern.
79
79
80 If the pattern has no kind, the default will be added.
80 If the pattern has no kind, the default will be added.
81 """
81 """
82 kind, pat = matchmod._patsplit(pattern, defaultkind)
82 kind, pat = matchmod._patsplit(pattern, defaultkind)
83 return b'%s:%s' % normalizesplitpattern(kind, pat)
83 return b'%s:%s' % normalizesplitpattern(kind, pat)
84
84
85
85
86 def parsepatterns(pats):
86 def parsepatterns(pats):
87 """Parses an iterable of patterns into a typed pattern set.
87 """Parses an iterable of patterns into a typed pattern set.
88
88
89 Patterns are assumed to be ``path:`` if no prefix is present.
89 Patterns are assumed to be ``path:`` if no prefix is present.
90 For safety and performance reasons, only some prefixes are allowed.
90 For safety and performance reasons, only some prefixes are allowed.
91 See ``validatepatterns()``.
91 See ``validatepatterns()``.
92
92
93 This function should be used on patterns that come from the user to
93 This function should be used on patterns that come from the user to
94 normalize and validate them to the internal data structure used for
94 normalize and validate them to the internal data structure used for
95 representing patterns.
95 representing patterns.
96 """
96 """
97 res = {normalizepattern(orig) for orig in pats}
97 res = {normalizepattern(orig) for orig in pats}
98 validatepatterns(res)
98 validatepatterns(res)
99 return res
99 return res
100
100
101
101
102 def validatepatterns(pats):
102 def validatepatterns(pats):
103 """Validate that patterns are in the expected data structure and format.
103 """Validate that patterns are in the expected data structure and format.
104
104
105 And that is a set of normalized patterns beginning with ``path:`` or
105 And that is a set of normalized patterns beginning with ``path:`` or
106 ``rootfilesin:``.
106 ``rootfilesin:``.
107
107
108 This function should be used to validate internal data structures
108 This function should be used to validate internal data structures
109 and patterns that are loaded from sources that use the internal,
109 and patterns that are loaded from sources that use the internal,
110 prefixed pattern representation (but can't necessarily be fully trusted).
110 prefixed pattern representation (but can't necessarily be fully trusted).
111 """
111 """
112 if not isinstance(pats, set):
112 if not isinstance(pats, set):
113 raise error.ProgrammingError(
113 raise error.ProgrammingError(
114 b'narrow patterns should be a set; got %r' % pats
114 b'narrow patterns should be a set; got %r' % pats
115 )
115 )
116
116
117 for pat in pats:
117 for pat in pats:
118 if not pat.startswith(VALID_PREFIXES):
118 if not pat.startswith(VALID_PREFIXES):
119 # Use a Mercurial exception because this can happen due to user
119 # Use a Mercurial exception because this can happen due to user
120 # bugs (e.g. manually updating spec file).
120 # bugs (e.g. manually updating spec file).
121 raise error.Abort(
121 raise error.Abort(
122 _(b'invalid prefix on narrow pattern: %s') % pat,
122 _(b'invalid prefix on narrow pattern: %s') % pat,
123 hint=_(
123 hint=_(
124 b'narrow patterns must begin with one of '
124 b'narrow patterns must begin with one of '
125 b'the following: %s'
125 b'the following: %s'
126 )
126 )
127 % b', '.join(VALID_PREFIXES),
127 % b', '.join(VALID_PREFIXES),
128 )
128 )
129
129
130
130
131 def format(includes, excludes):
131 def format(includes, excludes):
132 output = b'[include]\n'
132 output = b'[include]\n'
133 for i in sorted(includes - excludes):
133 for i in sorted(includes - excludes):
134 output += i + b'\n'
134 output += i + b'\n'
135 output += b'[exclude]\n'
135 output += b'[exclude]\n'
136 for e in sorted(excludes):
136 for e in sorted(excludes):
137 output += e + b'\n'
137 output += e + b'\n'
138 return output
138 return output
139
139
140
140
141 def match(root, include=None, exclude=None):
141 def match(root, include=None, exclude=None):
142 if not include:
142 if not include:
143 # Passing empty include and empty exclude to matchmod.match()
143 # Passing empty include and empty exclude to matchmod.match()
144 # gives a matcher that matches everything, so explicitly use
144 # gives a matcher that matches everything, so explicitly use
145 # the nevermatcher.
145 # the nevermatcher.
146 return matchmod.never()
146 return matchmod.never()
147 return matchmod.match(
147 return matchmod.match(
148 root, b'', [], include=include or [], exclude=exclude or []
148 root, b'', [], include=include or [], exclude=exclude or []
149 )
149 )
150
150
151
151
152 def parseconfig(ui, spec):
152 def parseconfig(ui, spec):
153 # maybe we should care about the profiles returned too
153 # maybe we should care about the profiles returned too
154 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
154 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
155 if profiles:
155 if profiles:
156 raise error.Abort(
156 raise error.Abort(
157 _(
157 _(
158 b"including other spec files using '%include' is not"
158 b"including other spec files using '%include' is not"
159 b" supported in narrowspec"
159 b" supported in narrowspec"
160 )
160 )
161 )
161 )
162
162
163 validatepatterns(includepats)
163 validatepatterns(includepats)
164 validatepatterns(excludepats)
164 validatepatterns(excludepats)
165
165
166 return includepats, excludepats
166 return includepats, excludepats
167
167
168
168
169 def load(repo):
169 def load(repo):
170 # Treat "narrowspec does not exist" the same as "narrowspec file exists
170 # Treat "narrowspec does not exist" the same as "narrowspec file exists
171 # and is empty".
171 # and is empty".
172 spec = repo.svfs.tryread(FILENAME)
172 spec = repo.svfs.tryread(FILENAME)
173 return parseconfig(repo.ui, spec)
173 return parseconfig(repo.ui, spec)
174
174
175
175
176 def save(repo, includepats, excludepats):
176 def save(repo, includepats, excludepats):
177 validatepatterns(includepats)
177 validatepatterns(includepats)
178 validatepatterns(excludepats)
178 validatepatterns(excludepats)
179 spec = format(includepats, excludepats)
179 spec = format(includepats, excludepats)
180 repo.svfs.write(FILENAME, spec)
180 repo.svfs.write(FILENAME, spec)
181
181
182
182
183 def copytoworkingcopy(repo):
183 def copytoworkingcopy(repo):
184 spec = repo.svfs.read(FILENAME)
184 spec = repo.svfs.read(FILENAME)
185 repo.vfs.write(DIRSTATE_FILENAME, spec)
185 repo.vfs.write(DIRSTATE_FILENAME, spec)
186
186
187
187
188 def savebackup(repo, backupname):
188 def savebackup(repo, backupname):
189 if requirements.NARROW_REQUIREMENT not in repo.requirements:
189 if requirements.NARROW_REQUIREMENT not in repo.requirements:
190 return
190 return
191 svfs = repo.svfs
191 svfs = repo.svfs
192 svfs.tryunlink(backupname)
192 svfs.tryunlink(backupname)
193 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
193 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
194
194
195
195
196 def restorebackup(repo, backupname):
196 def restorebackup(repo, backupname):
197 if requirements.NARROW_REQUIREMENT not in repo.requirements:
197 if requirements.NARROW_REQUIREMENT not in repo.requirements:
198 return
198 return
199 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
199 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
200
200
201
201
202 def savewcbackup(repo, backupname):
202 def savewcbackup(repo, backupname):
203 if requirements.NARROW_REQUIREMENT not in repo.requirements:
203 if requirements.NARROW_REQUIREMENT not in repo.requirements:
204 return
204 return
205 vfs = repo.vfs
205 vfs = repo.vfs
206 vfs.tryunlink(backupname)
206 vfs.tryunlink(backupname)
207 # It may not exist in old repos
207 # It may not exist in old repos
208 if vfs.exists(DIRSTATE_FILENAME):
208 if vfs.exists(DIRSTATE_FILENAME):
209 util.copyfile(
209 util.copyfile(
210 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
210 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
211 )
211 )
212
212
213
213
214 def restorewcbackup(repo, backupname):
214 def restorewcbackup(repo, backupname):
215 if requirements.NARROW_REQUIREMENT not in repo.requirements:
215 if requirements.NARROW_REQUIREMENT not in repo.requirements:
216 return
216 return
217 # It may not exist in old repos
217 # It may not exist in old repos
218 if repo.vfs.exists(backupname):
218 if repo.vfs.exists(backupname):
219 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
219 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
220
220
221
221
222 def clearwcbackup(repo, backupname):
222 def clearwcbackup(repo, backupname):
223 if requirements.NARROW_REQUIREMENT not in repo.requirements:
223 if requirements.NARROW_REQUIREMENT not in repo.requirements:
224 return
224 return
225 repo.vfs.tryunlink(backupname)
225 repo.vfs.tryunlink(backupname)
226
226
227
227
228 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
228 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
229 r"""Restricts the patterns according to repo settings,
229 r"""Restricts the patterns according to repo settings,
230 results in a logical AND operation
230 results in a logical AND operation
231
231
232 :param req_includes: requested includes
232 :param req_includes: requested includes
233 :param req_excludes: requested excludes
233 :param req_excludes: requested excludes
234 :param repo_includes: repo includes
234 :param repo_includes: repo includes
235 :param repo_excludes: repo excludes
235 :param repo_excludes: repo excludes
236 :return: include patterns, exclude patterns, and invalid include patterns.
236 :return: include patterns, exclude patterns, and invalid include patterns.
237 """
237 """
238 res_excludes = set(req_excludes)
238 res_excludes = set(req_excludes)
239 res_excludes.update(repo_excludes)
239 res_excludes.update(repo_excludes)
240 invalid_includes = []
240 invalid_includes = []
241 if not req_includes:
241 if not req_includes:
242 res_includes = set(repo_includes)
242 res_includes = set(repo_includes)
243 elif b'path:.' not in repo_includes:
243 elif b'path:.' not in repo_includes:
244 res_includes = []
244 res_includes = []
245 for req_include in req_includes:
245 for req_include in req_includes:
246 req_include = util.expandpath(util.normpath(req_include))
246 req_include = util.expandpath(util.normpath(req_include))
247 if req_include in repo_includes:
247 if req_include in repo_includes:
248 res_includes.append(req_include)
248 res_includes.append(req_include)
249 continue
249 continue
250 valid = False
250 valid = False
251 for repo_include in repo_includes:
251 for repo_include in repo_includes:
252 if req_include.startswith(repo_include + b'/'):
252 if req_include.startswith(repo_include + b'/'):
253 valid = True
253 valid = True
254 res_includes.append(req_include)
254 res_includes.append(req_include)
255 break
255 break
256 if not valid:
256 if not valid:
257 invalid_includes.append(req_include)
257 invalid_includes.append(req_include)
258 if len(res_includes) == 0:
258 if len(res_includes) == 0:
259 res_excludes = {b'path:.'}
259 res_excludes = {b'path:.'}
260 else:
260 else:
261 res_includes = set(res_includes)
261 res_includes = set(res_includes)
262 else:
262 else:
263 res_includes = set(req_includes)
263 res_includes = set(req_includes)
264 return res_includes, res_excludes, invalid_includes
264 return res_includes, res_excludes, invalid_includes
265
265
266
266
267 # These two are extracted for extensions (specifically for Google's CitC file
267 # These two are extracted for extensions (specifically for Google's CitC file
268 # system)
268 # system)
269 def _deletecleanfiles(repo, files):
269 def _deletecleanfiles(repo, files):
270 for f in files:
270 for f in files:
271 repo.wvfs.unlinkpath(f)
271 repo.wvfs.unlinkpath(f)
272
272
273
273
274 def _writeaddedfiles(repo, pctx, files):
274 def _writeaddedfiles(repo, pctx, files):
275 mresult = merge.mergeresult()
275 mresult = merge.mergeresult()
276 mf = repo[b'.'].manifest()
276 mf = repo[b'.'].manifest()
277 for f in files:
277 for f in files:
278 if not repo.wvfs.exists(f):
278 if not repo.wvfs.exists(f):
279 mresult.addfile(
279 mresult.addfile(
280 f,
280 f,
281 mergestatemod.ACTION_GET,
281 mergestatemod.ACTION_GET,
282 (mf.flags(f), False),
282 (mf.flags(f), False),
283 b"narrowspec updated",
283 b"narrowspec updated",
284 )
284 )
285 merge.applyupdates(
285 merge.applyupdates(
286 repo,
286 repo,
287 mresult,
287 mresult,
288 wctx=repo[None],
288 wctx=repo[None],
289 mctx=repo[b'.'],
289 mctx=repo[b'.'],
290 overwrite=False,
290 overwrite=False,
291 wantfiledata=False,
291 wantfiledata=False,
292 )
292 )
293
293
294
294
295 def checkworkingcopynarrowspec(repo):
295 def checkworkingcopynarrowspec(repo):
296 # Avoid infinite recursion when updating the working copy
296 # Avoid infinite recursion when updating the working copy
297 if getattr(repo, '_updatingnarrowspec', False):
297 if getattr(repo, '_updatingnarrowspec', False):
298 return
298 return
299 storespec = repo.svfs.tryread(FILENAME)
299 storespec = repo.svfs.tryread(FILENAME)
300 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
300 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
301 if wcspec != storespec:
301 if wcspec != storespec:
302 raise error.Abort(
302 raise error.Abort(
303 _(b"working copy's narrowspec is stale"),
303 _(b"working copy's narrowspec is stale"),
304 hint=_(b"run 'hg tracked --update-working-copy'"),
304 hint=_(b"run 'hg tracked --update-working-copy'"),
305 )
305 )
306
306
307
307
308 def updateworkingcopy(repo, assumeclean=False):
308 def updateworkingcopy(repo, assumeclean=False):
309 """updates the working copy and dirstate from the store narrowspec
309 """updates the working copy and dirstate from the store narrowspec
310
310
311 When assumeclean=True, files that are not known to be clean will also
311 When assumeclean=True, files that are not known to be clean will also
312 be deleted. It is then up to the caller to make sure they are clean.
312 be deleted. It is then up to the caller to make sure they are clean.
313 """
313 """
314 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
314 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
315 newspec = repo.svfs.tryread(FILENAME)
315 newspec = repo.svfs.tryread(FILENAME)
316 repo._updatingnarrowspec = True
316 repo._updatingnarrowspec = True
317
317
318 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
318 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
319 newincludes, newexcludes = parseconfig(repo.ui, newspec)
319 newincludes, newexcludes = parseconfig(repo.ui, newspec)
320 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
320 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
321 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
321 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
322 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
322 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
323 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
323 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
324
324
325 ds = repo.dirstate
325 ds = repo.dirstate
326 lookup, status = ds.status(
326 lookup, status = ds.status(
327 removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
327 removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
328 )
328 )
329 trackeddirty = status.modified + status.added
329 trackeddirty = status.modified + status.added
330 clean = status.clean
330 clean = status.clean
331 if assumeclean:
331 if assumeclean:
332 clean.extend(lookup)
332 clean.extend(lookup)
333 else:
333 else:
334 trackeddirty.extend(lookup)
334 trackeddirty.extend(lookup)
335 _deletecleanfiles(repo, clean)
335 _deletecleanfiles(repo, clean)
336 uipathfn = scmutil.getuipathfn(repo)
336 uipathfn = scmutil.getuipathfn(repo)
337 for f in sorted(trackeddirty):
337 for f in sorted(trackeddirty):
338 repo.ui.status(
338 repo.ui.status(
339 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
339 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
340 )
340 )
341 for f in sorted(status.unknown):
341 for f in sorted(status.unknown):
342 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
342 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
343 for f in sorted(status.ignored):
343 for f in sorted(status.ignored):
344 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
344 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
345 for f in clean + trackeddirty:
345 for f in clean + trackeddirty:
346 ds.drop(f)
346 ds.drop(f)
347
347
348 pctx = repo[b'.']
348 pctx = repo[b'.']
349
350 # only update added files that are in the sparse checkout
351 addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
349 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
352 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
350 for f in newfiles:
353 for f in newfiles:
351 ds.normallookup(f)
354 ds.normallookup(f)
352 _writeaddedfiles(repo, pctx, newfiles)
355 _writeaddedfiles(repo, pctx, newfiles)
353 repo._updatingnarrowspec = False
356 repo._updatingnarrowspec = False
@@ -1,72 +1,97 b''
1 Testing interaction of sparse and narrow when both are enabled on the client
1 Testing interaction of sparse and narrow when both are enabled on the client
2 side and we do a non-ellipsis clone
2 side and we do a non-ellipsis clone
3
3
4 #testcases tree flat
4 #testcases tree flat
5 $ . "$TESTDIR/narrow-library.sh"
5 $ . "$TESTDIR/narrow-library.sh"
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [extensions]
7 > [extensions]
8 > sparse =
8 > sparse =
9 > EOF
9 > EOF
10
10
11 #if tree
11 #if tree
12 $ cat << EOF >> $HGRCPATH
12 $ cat << EOF >> $HGRCPATH
13 > [experimental]
13 > [experimental]
14 > treemanifest = 1
14 > treemanifest = 1
15 > EOF
15 > EOF
16 #endif
16 #endif
17
17
18 $ hg init master
18 $ hg init master
19 $ cd master
19 $ cd master
20
20
21 $ mkdir inside
21 $ mkdir inside
22 $ echo 'inside' > inside/f
22 $ echo 'inside' > inside/f
23 $ hg add inside/f
23 $ hg add inside/f
24 $ hg commit -m 'add inside'
24 $ hg commit -m 'add inside'
25
25
26 $ mkdir widest
26 $ mkdir widest
27 $ echo 'widest' > widest/f
27 $ echo 'widest' > widest/f
28 $ hg add widest/f
28 $ hg add widest/f
29 $ hg commit -m 'add widest'
29 $ hg commit -m 'add widest'
30
30
31 $ mkdir outside
31 $ mkdir outside
32 $ echo 'outside' > outside/f
32 $ echo 'outside' > outside/f
33 $ hg add outside/f
33 $ hg add outside/f
34 $ hg commit -m 'add outside'
34 $ hg commit -m 'add outside'
35
35
36 $ cd ..
36 $ cd ..
37
37
38 narrow clone the inside file
38 narrow clone the inside file
39
39
40 $ hg clone --narrow ssh://user@dummy/master narrow --include inside/f
40 $ hg clone --narrow ssh://user@dummy/master narrow --include inside/f
41 requesting all changes
41 requesting all changes
42 adding changesets
42 adding changesets
43 adding manifests
43 adding manifests
44 adding file changes
44 adding file changes
45 added 3 changesets with 1 changes to 1 files
45 added 3 changesets with 1 changes to 1 files
46 new changesets *:* (glob)
46 new changesets *:* (glob)
47 updating to branch default
47 updating to branch default
48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 $ cd narrow
49 $ cd narrow
50 $ hg tracked
50 $ hg tracked
51 I path:inside/f
51 I path:inside/f
52 $ hg files
52 $ hg files
53 inside/f
53 inside/f
54
54
55 XXX: we should have a flag in `hg debugsparse` to list the sparse profile
55 XXX: we should have a flag in `hg debugsparse` to list the sparse profile
56 $ test -f .hg/sparse
56 $ test -f .hg/sparse
57 [1]
57 [1]
58
58
59 $ cat .hg/requires
59 $ cat .hg/requires
60 dotencode
60 dotencode
61 exp-dirstate-v2 (dirstate-v2 !)
61 exp-dirstate-v2 (dirstate-v2 !)
62 fncache
62 fncache
63 generaldelta
63 generaldelta
64 narrowhg-experimental
64 narrowhg-experimental
65 persistent-nodemap (rust !)
65 persistent-nodemap (rust !)
66 revlog-compression-zstd (zstd !)
66 revlog-compression-zstd (zstd !)
67 revlogv1
67 revlogv1
68 sparserevlog
68 sparserevlog
69 store
69 store
70 treemanifest (tree !)
70 treemanifest (tree !)
71
71
72 $ hg debugrebuilddirstate
72 $ hg debugrebuilddirstate
73
74 We only make the following assertions for the flat test case since in the
75 treemanifest test case debugsparse fails with "path ends in directory
76 separator: outside/" which seems like a bug unrelated to the regression this is
77 testing for.
78
79 #if flat
80 widening with both sparse and narrow is possible
81
82 $ cat >> .hg/hgrc <<EOF
83 > [extensions]
84 > sparse =
85 > narrow =
86 > EOF
87
88 $ hg debugsparse -X outside/f -X widest/f
89 $ hg tracked -q --addinclude outside/f
90 $ find . -name .hg -prune -o -type f -print | sort
91 ./inside/f
92
93 $ hg debugsparse -d outside/f
94 $ find . -name .hg -prune -o -type f -print | sort
95 ./inside/f
96 ./outside/f
97 #endif
General Comments 0
You need to be logged in to leave comments. Login now