##// END OF EJS Templates
narrow: enforce that narrow spec is written within a transaction
marmoute -
r51087:6794f927 default
parent child Browse files
Show More
@@ -1,384 +1,386 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9
9
10 from .i18n import _
10 from .i18n import _
11 from .pycompat import getattr
11 from .pycompat import getattr
12 from . import (
12 from . import (
13 error,
13 error,
14 match as matchmod,
14 match as matchmod,
15 merge,
15 merge,
16 mergestate as mergestatemod,
16 mergestate as mergestatemod,
17 scmutil,
17 scmutil,
18 sparse,
18 sparse,
19 util,
19 util,
20 )
20 )
21
21
22 # The file in .hg/store/ that indicates which paths exit in the store
22 # The file in .hg/store/ that indicates which paths exit in the store
23 FILENAME = b'narrowspec'
23 FILENAME = b'narrowspec'
24 # The file in .hg/ that indicates which paths exit in the dirstate
24 # The file in .hg/ that indicates which paths exit in the dirstate
25 DIRSTATE_FILENAME = b'narrowspec.dirstate'
25 DIRSTATE_FILENAME = b'narrowspec.dirstate'
26
26
27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 # only contain patterns that are fast and safe to evaluate. Keep in mind
28 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 # that patterns are supplied by clients and executed on remote servers
29 # that patterns are supplied by clients and executed on remote servers
30 # as part of wire protocol commands. That means that changes to this
30 # as part of wire protocol commands. That means that changes to this
31 # data structure influence the wire protocol and should not be taken
31 # data structure influence the wire protocol and should not be taken
32 # lightly - especially removals.
32 # lightly - especially removals.
33 VALID_PREFIXES = (
33 VALID_PREFIXES = (
34 b'path:',
34 b'path:',
35 b'rootfilesin:',
35 b'rootfilesin:',
36 )
36 )
37
37
38
38
39 def normalizesplitpattern(kind, pat):
39 def normalizesplitpattern(kind, pat):
40 """Returns the normalized version of a pattern and kind.
40 """Returns the normalized version of a pattern and kind.
41
41
42 Returns a tuple with the normalized kind and normalized pattern.
42 Returns a tuple with the normalized kind and normalized pattern.
43 """
43 """
44 pat = pat.rstrip(b'/')
44 pat = pat.rstrip(b'/')
45 _validatepattern(pat)
45 _validatepattern(pat)
46 return kind, pat
46 return kind, pat
47
47
48
48
49 def _numlines(s):
49 def _numlines(s):
50 """Returns the number of lines in s, including ending empty lines."""
50 """Returns the number of lines in s, including ending empty lines."""
51 # We use splitlines because it is Unicode-friendly and thus Python 3
51 # We use splitlines because it is Unicode-friendly and thus Python 3
52 # compatible. However, it does not count empty lines at the end, so trick
52 # compatible. However, it does not count empty lines at the end, so trick
53 # it by adding a character at the end.
53 # it by adding a character at the end.
54 return len((s + b'x').splitlines())
54 return len((s + b'x').splitlines())
55
55
56
56
57 def _validatepattern(pat):
57 def _validatepattern(pat):
58 """Validates the pattern and aborts if it is invalid.
58 """Validates the pattern and aborts if it is invalid.
59
59
60 Patterns are stored in the narrowspec as newline-separated
60 Patterns are stored in the narrowspec as newline-separated
61 POSIX-style bytestring paths. There's no escaping.
61 POSIX-style bytestring paths. There's no escaping.
62 """
62 """
63
63
64 # We use newlines as separators in the narrowspec file, so don't allow them
64 # We use newlines as separators in the narrowspec file, so don't allow them
65 # in patterns.
65 # in patterns.
66 if _numlines(pat) > 1:
66 if _numlines(pat) > 1:
67 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
67 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
68
68
69 components = pat.split(b'/')
69 components = pat.split(b'/')
70 if b'.' in components or b'..' in components:
70 if b'.' in components or b'..' in components:
71 raise error.Abort(
71 raise error.Abort(
72 _(b'"." and ".." are not allowed in narrowspec paths')
72 _(b'"." and ".." are not allowed in narrowspec paths')
73 )
73 )
74
74
75
75
76 def normalizepattern(pattern, defaultkind=b'path'):
76 def normalizepattern(pattern, defaultkind=b'path'):
77 """Returns the normalized version of a text-format pattern.
77 """Returns the normalized version of a text-format pattern.
78
78
79 If the pattern has no kind, the default will be added.
79 If the pattern has no kind, the default will be added.
80 """
80 """
81 kind, pat = matchmod._patsplit(pattern, defaultkind)
81 kind, pat = matchmod._patsplit(pattern, defaultkind)
82 return b'%s:%s' % normalizesplitpattern(kind, pat)
82 return b'%s:%s' % normalizesplitpattern(kind, pat)
83
83
84
84
85 def parsepatterns(pats):
85 def parsepatterns(pats):
86 """Parses an iterable of patterns into a typed pattern set.
86 """Parses an iterable of patterns into a typed pattern set.
87
87
88 Patterns are assumed to be ``path:`` if no prefix is present.
88 Patterns are assumed to be ``path:`` if no prefix is present.
89 For safety and performance reasons, only some prefixes are allowed.
89 For safety and performance reasons, only some prefixes are allowed.
90 See ``validatepatterns()``.
90 See ``validatepatterns()``.
91
91
92 This function should be used on patterns that come from the user to
92 This function should be used on patterns that come from the user to
93 normalize and validate them to the internal data structure used for
93 normalize and validate them to the internal data structure used for
94 representing patterns.
94 representing patterns.
95 """
95 """
96 res = {normalizepattern(orig) for orig in pats}
96 res = {normalizepattern(orig) for orig in pats}
97 validatepatterns(res)
97 validatepatterns(res)
98 return res
98 return res
99
99
100
100
101 def validatepatterns(pats):
101 def validatepatterns(pats):
102 """Validate that patterns are in the expected data structure and format.
102 """Validate that patterns are in the expected data structure and format.
103
103
104 And that is a set of normalized patterns beginning with ``path:`` or
104 And that is a set of normalized patterns beginning with ``path:`` or
105 ``rootfilesin:``.
105 ``rootfilesin:``.
106
106
107 This function should be used to validate internal data structures
107 This function should be used to validate internal data structures
108 and patterns that are loaded from sources that use the internal,
108 and patterns that are loaded from sources that use the internal,
109 prefixed pattern representation (but can't necessarily be fully trusted).
109 prefixed pattern representation (but can't necessarily be fully trusted).
110 """
110 """
111 with util.timedcm('narrowspec.validatepatterns(pats size=%d)', len(pats)):
111 with util.timedcm('narrowspec.validatepatterns(pats size=%d)', len(pats)):
112 if not isinstance(pats, set):
112 if not isinstance(pats, set):
113 raise error.ProgrammingError(
113 raise error.ProgrammingError(
114 b'narrow patterns should be a set; got %r' % pats
114 b'narrow patterns should be a set; got %r' % pats
115 )
115 )
116
116
117 for pat in pats:
117 for pat in pats:
118 if not pat.startswith(VALID_PREFIXES):
118 if not pat.startswith(VALID_PREFIXES):
119 # Use a Mercurial exception because this can happen due to user
119 # Use a Mercurial exception because this can happen due to user
120 # bugs (e.g. manually updating spec file).
120 # bugs (e.g. manually updating spec file).
121 raise error.Abort(
121 raise error.Abort(
122 _(b'invalid prefix on narrow pattern: %s') % pat,
122 _(b'invalid prefix on narrow pattern: %s') % pat,
123 hint=_(
123 hint=_(
124 b'narrow patterns must begin with one of '
124 b'narrow patterns must begin with one of '
125 b'the following: %s'
125 b'the following: %s'
126 )
126 )
127 % b', '.join(VALID_PREFIXES),
127 % b', '.join(VALID_PREFIXES),
128 )
128 )
129
129
130
130
131 def format(includes, excludes):
131 def format(includes, excludes):
132 output = b'[include]\n'
132 output = b'[include]\n'
133 for i in sorted(includes - excludes):
133 for i in sorted(includes - excludes):
134 output += i + b'\n'
134 output += i + b'\n'
135 output += b'[exclude]\n'
135 output += b'[exclude]\n'
136 for e in sorted(excludes):
136 for e in sorted(excludes):
137 output += e + b'\n'
137 output += e + b'\n'
138 return output
138 return output
139
139
140
140
141 def match(root, include=None, exclude=None):
141 def match(root, include=None, exclude=None):
142 if not include:
142 if not include:
143 # Passing empty include and empty exclude to matchmod.match()
143 # Passing empty include and empty exclude to matchmod.match()
144 # gives a matcher that matches everything, so explicitly use
144 # gives a matcher that matches everything, so explicitly use
145 # the nevermatcher.
145 # the nevermatcher.
146 return matchmod.never()
146 return matchmod.never()
147 return matchmod.match(
147 return matchmod.match(
148 root, b'', [], include=include or [], exclude=exclude or []
148 root, b'', [], include=include or [], exclude=exclude or []
149 )
149 )
150
150
151
151
152 def parseconfig(ui, spec):
152 def parseconfig(ui, spec):
153 # maybe we should care about the profiles returned too
153 # maybe we should care about the profiles returned too
154 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
154 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
155 if profiles:
155 if profiles:
156 raise error.Abort(
156 raise error.Abort(
157 _(
157 _(
158 b"including other spec files using '%include' is not"
158 b"including other spec files using '%include' is not"
159 b" supported in narrowspec"
159 b" supported in narrowspec"
160 )
160 )
161 )
161 )
162
162
163 validatepatterns(includepats)
163 validatepatterns(includepats)
164 validatepatterns(excludepats)
164 validatepatterns(excludepats)
165
165
166 return includepats, excludepats
166 return includepats, excludepats
167
167
168
168
169 def load(repo):
169 def load(repo):
170 # Treat "narrowspec does not exist" the same as "narrowspec file exists
170 # Treat "narrowspec does not exist" the same as "narrowspec file exists
171 # and is empty".
171 # and is empty".
172 spec = repo.svfs.tryread(FILENAME)
172 spec = repo.svfs.tryread(FILENAME)
173 return parseconfig(repo.ui, spec)
173 return parseconfig(repo.ui, spec)
174
174
175
175
176 def save(repo, includepats, excludepats):
176 def save(repo, includepats, excludepats):
177 repo = repo.unfiltered()
177 repo = repo.unfiltered()
178
178
179 validatepatterns(includepats)
179 validatepatterns(includepats)
180 validatepatterns(excludepats)
180 validatepatterns(excludepats)
181 spec = format(includepats, excludepats)
181 spec = format(includepats, excludepats)
182
182
183 tr = repo.currenttransaction()
183 tr = repo.currenttransaction()
184 if tr is None:
184 if tr is None:
185 repo.svfs.write(FILENAME, spec)
185 m = "changing narrow spec outside of a transaction"
186 raise error.ProgrammingError(m)
186 else:
187 else:
187 # the roundtrip is sometime different
188 # the roundtrip is sometime different
188 # not taking any chance for now
189 # not taking any chance for now
189 value = parseconfig(repo.ui, spec)
190 value = parseconfig(repo.ui, spec)
190 reporef = weakref.ref(repo)
191 reporef = weakref.ref(repo)
191
192
192 def clean_pending(tr):
193 def clean_pending(tr):
193 r = reporef()
194 r = reporef()
194 if r is not None:
195 if r is not None:
195 r._pending_narrow_pats = None
196 r._pending_narrow_pats = None
196
197
197 tr.addpostclose(b'narrow-spec', clean_pending)
198 tr.addpostclose(b'narrow-spec', clean_pending)
198 tr.addabort(b'narrow-spec', clean_pending)
199 tr.addabort(b'narrow-spec', clean_pending)
199 repo._pending_narrow_pats = value
200 repo._pending_narrow_pats = value
200
201
201 def write_spec(f):
202 def write_spec(f):
202 f.write(spec)
203 f.write(spec)
203
204
204 tr.addfilegenerator(
205 tr.addfilegenerator(
205 # XXX think about order at some point
206 # XXX think about order at some point
206 b"narrow-spec",
207 b"narrow-spec",
207 (FILENAME,),
208 (FILENAME,),
208 write_spec,
209 write_spec,
209 location=b'store',
210 location=b'store',
210 )
211 )
211
212
212
213
213 def copytoworkingcopy(repo):
214 def copytoworkingcopy(repo):
214 repo = repo.unfiltered()
215 repo = repo.unfiltered()
215 tr = repo.currenttransaction()
216 tr = repo.currenttransaction()
216 spec = format(*repo.narrowpats)
217 spec = format(*repo.narrowpats)
217 if tr is None:
218 if tr is None:
218 repo.vfs.write(DIRSTATE_FILENAME, spec)
219 m = "changing narrow spec outside of a transaction"
220 raise error.ProgrammingError(m)
219 else:
221 else:
220
222
221 reporef = weakref.ref(repo)
223 reporef = weakref.ref(repo)
222
224
223 def clean_pending(tr):
225 def clean_pending(tr):
224 r = reporef()
226 r = reporef()
225 if r is not None:
227 if r is not None:
226 r._pending_narrow_pats_dirstate = None
228 r._pending_narrow_pats_dirstate = None
227
229
228 tr.addpostclose(b'narrow-spec-dirstate', clean_pending)
230 tr.addpostclose(b'narrow-spec-dirstate', clean_pending)
229 tr.addabort(b'narrow-spec-dirstate', clean_pending)
231 tr.addabort(b'narrow-spec-dirstate', clean_pending)
230 repo._pending_narrow_pats_dirstate = repo.narrowpats
232 repo._pending_narrow_pats_dirstate = repo.narrowpats
231
233
232 def write_spec(f):
234 def write_spec(f):
233 f.write(spec)
235 f.write(spec)
234
236
235 tr.addfilegenerator(
237 tr.addfilegenerator(
236 # XXX think about order at some point
238 # XXX think about order at some point
237 b"narrow-spec-dirstate",
239 b"narrow-spec-dirstate",
238 (DIRSTATE_FILENAME,),
240 (DIRSTATE_FILENAME,),
239 write_spec,
241 write_spec,
240 location=b'plain',
242 location=b'plain',
241 )
243 )
242
244
243
245
244 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
246 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
245 r"""Restricts the patterns according to repo settings,
247 r"""Restricts the patterns according to repo settings,
246 results in a logical AND operation
248 results in a logical AND operation
247
249
248 :param req_includes: requested includes
250 :param req_includes: requested includes
249 :param req_excludes: requested excludes
251 :param req_excludes: requested excludes
250 :param repo_includes: repo includes
252 :param repo_includes: repo includes
251 :param repo_excludes: repo excludes
253 :param repo_excludes: repo excludes
252 :return: include patterns, exclude patterns, and invalid include patterns.
254 :return: include patterns, exclude patterns, and invalid include patterns.
253 """
255 """
254 res_excludes = set(req_excludes)
256 res_excludes = set(req_excludes)
255 res_excludes.update(repo_excludes)
257 res_excludes.update(repo_excludes)
256 invalid_includes = []
258 invalid_includes = []
257 if not req_includes:
259 if not req_includes:
258 res_includes = set(repo_includes)
260 res_includes = set(repo_includes)
259 elif b'path:.' not in repo_includes:
261 elif b'path:.' not in repo_includes:
260 res_includes = []
262 res_includes = []
261 for req_include in req_includes:
263 for req_include in req_includes:
262 req_include = util.expandpath(util.normpath(req_include))
264 req_include = util.expandpath(util.normpath(req_include))
263 if req_include in repo_includes:
265 if req_include in repo_includes:
264 res_includes.append(req_include)
266 res_includes.append(req_include)
265 continue
267 continue
266 valid = False
268 valid = False
267 for repo_include in repo_includes:
269 for repo_include in repo_includes:
268 if req_include.startswith(repo_include + b'/'):
270 if req_include.startswith(repo_include + b'/'):
269 valid = True
271 valid = True
270 res_includes.append(req_include)
272 res_includes.append(req_include)
271 break
273 break
272 if not valid:
274 if not valid:
273 invalid_includes.append(req_include)
275 invalid_includes.append(req_include)
274 if len(res_includes) == 0:
276 if len(res_includes) == 0:
275 res_excludes = {b'path:.'}
277 res_excludes = {b'path:.'}
276 else:
278 else:
277 res_includes = set(res_includes)
279 res_includes = set(res_includes)
278 else:
280 else:
279 res_includes = set(req_includes)
281 res_includes = set(req_includes)
280 return res_includes, res_excludes, invalid_includes
282 return res_includes, res_excludes, invalid_includes
281
283
282
284
283 # These two are extracted for extensions (specifically for Google's CitC file
285 # These two are extracted for extensions (specifically for Google's CitC file
284 # system)
286 # system)
285 def _deletecleanfiles(repo, files):
287 def _deletecleanfiles(repo, files):
286 for f in files:
288 for f in files:
287 repo.wvfs.unlinkpath(f)
289 repo.wvfs.unlinkpath(f)
288
290
289
291
290 def _writeaddedfiles(repo, pctx, files):
292 def _writeaddedfiles(repo, pctx, files):
291 mresult = merge.mergeresult()
293 mresult = merge.mergeresult()
292 mf = repo[b'.'].manifest()
294 mf = repo[b'.'].manifest()
293 for f in files:
295 for f in files:
294 if not repo.wvfs.exists(f):
296 if not repo.wvfs.exists(f):
295 mresult.addfile(
297 mresult.addfile(
296 f,
298 f,
297 mergestatemod.ACTION_GET,
299 mergestatemod.ACTION_GET,
298 (mf.flags(f), False),
300 (mf.flags(f), False),
299 b"narrowspec updated",
301 b"narrowspec updated",
300 )
302 )
301 merge.applyupdates(
303 merge.applyupdates(
302 repo,
304 repo,
303 mresult,
305 mresult,
304 wctx=repo[None],
306 wctx=repo[None],
305 mctx=repo[b'.'],
307 mctx=repo[b'.'],
306 overwrite=False,
308 overwrite=False,
307 wantfiledata=False,
309 wantfiledata=False,
308 )
310 )
309
311
310
312
311 def checkworkingcopynarrowspec(repo):
313 def checkworkingcopynarrowspec(repo):
312 # Avoid infinite recursion when updating the working copy
314 # Avoid infinite recursion when updating the working copy
313 if getattr(repo, '_updatingnarrowspec', False):
315 if getattr(repo, '_updatingnarrowspec', False):
314 return
316 return
315 storespec = repo.narrowpats
317 storespec = repo.narrowpats
316 wcspec = repo._pending_narrow_pats_dirstate
318 wcspec = repo._pending_narrow_pats_dirstate
317 if wcspec is None:
319 if wcspec is None:
318 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
320 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
319 wcspec = parseconfig(repo.ui, oldspec)
321 wcspec = parseconfig(repo.ui, oldspec)
320 if wcspec != storespec:
322 if wcspec != storespec:
321 raise error.StateError(
323 raise error.StateError(
322 _(b"working copy's narrowspec is stale"),
324 _(b"working copy's narrowspec is stale"),
323 hint=_(b"run 'hg tracked --update-working-copy'"),
325 hint=_(b"run 'hg tracked --update-working-copy'"),
324 )
326 )
325
327
326
328
327 def updateworkingcopy(repo, assumeclean=False):
329 def updateworkingcopy(repo, assumeclean=False):
328 """updates the working copy and dirstate from the store narrowspec
330 """updates the working copy and dirstate from the store narrowspec
329
331
330 When assumeclean=True, files that are not known to be clean will also
332 When assumeclean=True, files that are not known to be clean will also
331 be deleted. It is then up to the caller to make sure they are clean.
333 be deleted. It is then up to the caller to make sure they are clean.
332 """
334 """
333 old = repo._pending_narrow_pats_dirstate
335 old = repo._pending_narrow_pats_dirstate
334 if old is None:
336 if old is None:
335 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
337 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
336 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
338 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
337 else:
339 else:
338 oldincludes, oldexcludes = old
340 oldincludes, oldexcludes = old
339 newincludes, newexcludes = repo.narrowpats
341 newincludes, newexcludes = repo.narrowpats
340 repo._updatingnarrowspec = True
342 repo._updatingnarrowspec = True
341
343
342 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
344 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
343 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
345 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
344 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
346 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
345 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
347 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
346
348
347 assert repo.currentwlock() is not None
349 assert repo.currentwlock() is not None
348 ds = repo.dirstate
350 ds = repo.dirstate
349 with ds.running_status(repo):
351 with ds.running_status(repo):
350 lookup, status, _mtime_boundary = ds.status(
352 lookup, status, _mtime_boundary = ds.status(
351 removedmatch,
353 removedmatch,
352 subrepos=[],
354 subrepos=[],
353 ignored=True,
355 ignored=True,
354 clean=True,
356 clean=True,
355 unknown=True,
357 unknown=True,
356 )
358 )
357 trackeddirty = status.modified + status.added
359 trackeddirty = status.modified + status.added
358 clean = status.clean
360 clean = status.clean
359 if assumeclean:
361 if assumeclean:
360 clean.extend(lookup)
362 clean.extend(lookup)
361 else:
363 else:
362 trackeddirty.extend(lookup)
364 trackeddirty.extend(lookup)
363 _deletecleanfiles(repo, clean)
365 _deletecleanfiles(repo, clean)
364 uipathfn = scmutil.getuipathfn(repo)
366 uipathfn = scmutil.getuipathfn(repo)
365 for f in sorted(trackeddirty):
367 for f in sorted(trackeddirty):
366 repo.ui.status(
368 repo.ui.status(
367 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
369 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
368 )
370 )
369 for f in sorted(status.unknown):
371 for f in sorted(status.unknown):
370 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
372 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
371 for f in sorted(status.ignored):
373 for f in sorted(status.ignored):
372 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
374 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
373 for f in clean + trackeddirty:
375 for f in clean + trackeddirty:
374 ds.update_file(f, p1_tracked=False, wc_tracked=False)
376 ds.update_file(f, p1_tracked=False, wc_tracked=False)
375
377
376 pctx = repo[b'.']
378 pctx = repo[b'.']
377
379
378 # only update added files that are in the sparse checkout
380 # only update added files that are in the sparse checkout
379 addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
381 addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
380 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
382 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
381 for f in newfiles:
383 for f in newfiles:
382 ds.update_file(f, p1_tracked=True, wc_tracked=True, possibly_dirty=True)
384 ds.update_file(f, p1_tracked=True, wc_tracked=True, possibly_dirty=True)
383 _writeaddedfiles(repo, pctx, newfiles)
385 _writeaddedfiles(repo, pctx, newfiles)
384 repo._updatingnarrowspec = False
386 repo._updatingnarrowspec = False
General Comments 0
You need to be logged in to leave comments. Login now