##// END OF EJS Templates
tests: remove doctest in narrowspec, it is broken...
Kyle Lippincott -
r45053:1922694d default
parent child Browse files
Show More
@@ -1,364 +1,349 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .pycompat import getattr
11 from .pycompat import getattr
12 from .interfaces import repository
12 from .interfaces import repository
13 from . import (
13 from . import (
14 error,
14 error,
15 match as matchmod,
15 match as matchmod,
16 merge,
16 merge,
17 scmutil,
17 scmutil,
18 sparse,
18 sparse,
19 util,
19 util,
20 )
20 )
21
21
22 # The file in .hg/store/ that indicates which paths exit in the store
22 # The file in .hg/store/ that indicates which paths exit in the store
23 FILENAME = b'narrowspec'
23 FILENAME = b'narrowspec'
24 # The file in .hg/ that indicates which paths exit in the dirstate
24 # The file in .hg/ that indicates which paths exit in the dirstate
25 DIRSTATE_FILENAME = b'narrowspec.dirstate'
25 DIRSTATE_FILENAME = b'narrowspec.dirstate'
26
26
27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 # only contain patterns that are fast and safe to evaluate. Keep in mind
28 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 # that patterns are supplied by clients and executed on remote servers
29 # that patterns are supplied by clients and executed on remote servers
30 # as part of wire protocol commands. That means that changes to this
30 # as part of wire protocol commands. That means that changes to this
31 # data structure influence the wire protocol and should not be taken
31 # data structure influence the wire protocol and should not be taken
32 # lightly - especially removals.
32 # lightly - especially removals.
33 VALID_PREFIXES = (
33 VALID_PREFIXES = (
34 b'path:',
34 b'path:',
35 b'rootfilesin:',
35 b'rootfilesin:',
36 )
36 )
37
37
38
38
39 def normalizesplitpattern(kind, pat):
39 def normalizesplitpattern(kind, pat):
40 """Returns the normalized version of a pattern and kind.
40 """Returns the normalized version of a pattern and kind.
41
41
42 Returns a tuple with the normalized kind and normalized pattern.
42 Returns a tuple with the normalized kind and normalized pattern.
43 """
43 """
44 pat = pat.rstrip(b'/')
44 pat = pat.rstrip(b'/')
45 _validatepattern(pat)
45 _validatepattern(pat)
46 return kind, pat
46 return kind, pat
47
47
48
48
49 def _numlines(s):
49 def _numlines(s):
50 """Returns the number of lines in s, including ending empty lines."""
50 """Returns the number of lines in s, including ending empty lines."""
51 # We use splitlines because it is Unicode-friendly and thus Python 3
51 # We use splitlines because it is Unicode-friendly and thus Python 3
52 # compatible. However, it does not count empty lines at the end, so trick
52 # compatible. However, it does not count empty lines at the end, so trick
53 # it by adding a character at the end.
53 # it by adding a character at the end.
54 return len((s + b'x').splitlines())
54 return len((s + b'x').splitlines())
55
55
56
56
57 def _validatepattern(pat):
57 def _validatepattern(pat):
58 """Validates the pattern and aborts if it is invalid.
58 """Validates the pattern and aborts if it is invalid.
59
59
60 Patterns are stored in the narrowspec as newline-separated
60 Patterns are stored in the narrowspec as newline-separated
61 POSIX-style bytestring paths. There's no escaping.
61 POSIX-style bytestring paths. There's no escaping.
62 """
62 """
63
63
64 # We use newlines as separators in the narrowspec file, so don't allow them
64 # We use newlines as separators in the narrowspec file, so don't allow them
65 # in patterns.
65 # in patterns.
66 if _numlines(pat) > 1:
66 if _numlines(pat) > 1:
67 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
67 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
68
68
69 components = pat.split(b'/')
69 components = pat.split(b'/')
70 if b'.' in components or b'..' in components:
70 if b'.' in components or b'..' in components:
71 raise error.Abort(
71 raise error.Abort(
72 _(b'"." and ".." are not allowed in narrowspec paths')
72 _(b'"." and ".." are not allowed in narrowspec paths')
73 )
73 )
74
74
75
75
76 def normalizepattern(pattern, defaultkind=b'path'):
76 def normalizepattern(pattern, defaultkind=b'path'):
77 """Returns the normalized version of a text-format pattern.
77 """Returns the normalized version of a text-format pattern.
78
78
79 If the pattern has no kind, the default will be added.
79 If the pattern has no kind, the default will be added.
80 """
80 """
81 kind, pat = matchmod._patsplit(pattern, defaultkind)
81 kind, pat = matchmod._patsplit(pattern, defaultkind)
82 return b'%s:%s' % normalizesplitpattern(kind, pat)
82 return b'%s:%s' % normalizesplitpattern(kind, pat)
83
83
84
84
85 def parsepatterns(pats):
85 def parsepatterns(pats):
86 """Parses an iterable of patterns into a typed pattern set.
86 """Parses an iterable of patterns into a typed pattern set.
87
87
88 Patterns are assumed to be ``path:`` if no prefix is present.
88 Patterns are assumed to be ``path:`` if no prefix is present.
89 For safety and performance reasons, only some prefixes are allowed.
89 For safety and performance reasons, only some prefixes are allowed.
90 See ``validatepatterns()``.
90 See ``validatepatterns()``.
91
91
92 This function should be used on patterns that come from the user to
92 This function should be used on patterns that come from the user to
93 normalize and validate them to the internal data structure used for
93 normalize and validate them to the internal data structure used for
94 representing patterns.
94 representing patterns.
95 """
95 """
96 res = {normalizepattern(orig) for orig in pats}
96 res = {normalizepattern(orig) for orig in pats}
97 validatepatterns(res)
97 validatepatterns(res)
98 return res
98 return res
99
99
100
100
101 def validatepatterns(pats):
101 def validatepatterns(pats):
102 """Validate that patterns are in the expected data structure and format.
102 """Validate that patterns are in the expected data structure and format.
103
103
104 And that is a set of normalized patterns beginning with ``path:`` or
104 And that is a set of normalized patterns beginning with ``path:`` or
105 ``rootfilesin:``.
105 ``rootfilesin:``.
106
106
107 This function should be used to validate internal data structures
107 This function should be used to validate internal data structures
108 and patterns that are loaded from sources that use the internal,
108 and patterns that are loaded from sources that use the internal,
109 prefixed pattern representation (but can't necessarily be fully trusted).
109 prefixed pattern representation (but can't necessarily be fully trusted).
110 """
110 """
111 if not isinstance(pats, set):
111 if not isinstance(pats, set):
112 raise error.ProgrammingError(
112 raise error.ProgrammingError(
113 b'narrow patterns should be a set; got %r' % pats
113 b'narrow patterns should be a set; got %r' % pats
114 )
114 )
115
115
116 for pat in pats:
116 for pat in pats:
117 if not pat.startswith(VALID_PREFIXES):
117 if not pat.startswith(VALID_PREFIXES):
118 # Use a Mercurial exception because this can happen due to user
118 # Use a Mercurial exception because this can happen due to user
119 # bugs (e.g. manually updating spec file).
119 # bugs (e.g. manually updating spec file).
120 raise error.Abort(
120 raise error.Abort(
121 _(b'invalid prefix on narrow pattern: %s') % pat,
121 _(b'invalid prefix on narrow pattern: %s') % pat,
122 hint=_(
122 hint=_(
123 b'narrow patterns must begin with one of '
123 b'narrow patterns must begin with one of '
124 b'the following: %s'
124 b'the following: %s'
125 )
125 )
126 % b', '.join(VALID_PREFIXES),
126 % b', '.join(VALID_PREFIXES),
127 )
127 )
128
128
129
129
130 def format(includes, excludes):
130 def format(includes, excludes):
131 output = b'[include]\n'
131 output = b'[include]\n'
132 for i in sorted(includes - excludes):
132 for i in sorted(includes - excludes):
133 output += i + b'\n'
133 output += i + b'\n'
134 output += b'[exclude]\n'
134 output += b'[exclude]\n'
135 for e in sorted(excludes):
135 for e in sorted(excludes):
136 output += e + b'\n'
136 output += e + b'\n'
137 return output
137 return output
138
138
139
139
140 def match(root, include=None, exclude=None):
140 def match(root, include=None, exclude=None):
141 if not include:
141 if not include:
142 # Passing empty include and empty exclude to matchmod.match()
142 # Passing empty include and empty exclude to matchmod.match()
143 # gives a matcher that matches everything, so explicitly use
143 # gives a matcher that matches everything, so explicitly use
144 # the nevermatcher.
144 # the nevermatcher.
145 return matchmod.never()
145 return matchmod.never()
146 return matchmod.match(
146 return matchmod.match(
147 root, b'', [], include=include or [], exclude=exclude or []
147 root, b'', [], include=include or [], exclude=exclude or []
148 )
148 )
149
149
150
150
151 def parseconfig(ui, spec):
151 def parseconfig(ui, spec):
152 # maybe we should care about the profiles returned too
152 # maybe we should care about the profiles returned too
153 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
153 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
154 if profiles:
154 if profiles:
155 raise error.Abort(
155 raise error.Abort(
156 _(
156 _(
157 b"including other spec files using '%include' is not"
157 b"including other spec files using '%include' is not"
158 b" supported in narrowspec"
158 b" supported in narrowspec"
159 )
159 )
160 )
160 )
161
161
162 validatepatterns(includepats)
162 validatepatterns(includepats)
163 validatepatterns(excludepats)
163 validatepatterns(excludepats)
164
164
165 return includepats, excludepats
165 return includepats, excludepats
166
166
167
167
168 def load(repo):
168 def load(repo):
169 # Treat "narrowspec does not exist" the same as "narrowspec file exists
169 # Treat "narrowspec does not exist" the same as "narrowspec file exists
170 # and is empty".
170 # and is empty".
171 spec = repo.svfs.tryread(FILENAME)
171 spec = repo.svfs.tryread(FILENAME)
172 return parseconfig(repo.ui, spec)
172 return parseconfig(repo.ui, spec)
173
173
174
174
175 def save(repo, includepats, excludepats):
175 def save(repo, includepats, excludepats):
176 validatepatterns(includepats)
176 validatepatterns(includepats)
177 validatepatterns(excludepats)
177 validatepatterns(excludepats)
178 spec = format(includepats, excludepats)
178 spec = format(includepats, excludepats)
179 repo.svfs.write(FILENAME, spec)
179 repo.svfs.write(FILENAME, spec)
180
180
181
181
182 def copytoworkingcopy(repo):
182 def copytoworkingcopy(repo):
183 spec = repo.svfs.read(FILENAME)
183 spec = repo.svfs.read(FILENAME)
184 repo.vfs.write(DIRSTATE_FILENAME, spec)
184 repo.vfs.write(DIRSTATE_FILENAME, spec)
185
185
186
186
187 def savebackup(repo, backupname):
187 def savebackup(repo, backupname):
188 if repository.NARROW_REQUIREMENT not in repo.requirements:
188 if repository.NARROW_REQUIREMENT not in repo.requirements:
189 return
189 return
190 svfs = repo.svfs
190 svfs = repo.svfs
191 svfs.tryunlink(backupname)
191 svfs.tryunlink(backupname)
192 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
192 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
193
193
194
194
195 def restorebackup(repo, backupname):
195 def restorebackup(repo, backupname):
196 if repository.NARROW_REQUIREMENT not in repo.requirements:
196 if repository.NARROW_REQUIREMENT not in repo.requirements:
197 return
197 return
198 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
198 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
199
199
200
200
201 def savewcbackup(repo, backupname):
201 def savewcbackup(repo, backupname):
202 if repository.NARROW_REQUIREMENT not in repo.requirements:
202 if repository.NARROW_REQUIREMENT not in repo.requirements:
203 return
203 return
204 vfs = repo.vfs
204 vfs = repo.vfs
205 vfs.tryunlink(backupname)
205 vfs.tryunlink(backupname)
206 # It may not exist in old repos
206 # It may not exist in old repos
207 if vfs.exists(DIRSTATE_FILENAME):
207 if vfs.exists(DIRSTATE_FILENAME):
208 util.copyfile(
208 util.copyfile(
209 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
209 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
210 )
210 )
211
211
212
212
213 def restorewcbackup(repo, backupname):
213 def restorewcbackup(repo, backupname):
214 if repository.NARROW_REQUIREMENT not in repo.requirements:
214 if repository.NARROW_REQUIREMENT not in repo.requirements:
215 return
215 return
216 # It may not exist in old repos
216 # It may not exist in old repos
217 if repo.vfs.exists(backupname):
217 if repo.vfs.exists(backupname):
218 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
218 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
219
219
220
220
221 def clearwcbackup(repo, backupname):
221 def clearwcbackup(repo, backupname):
222 if repository.NARROW_REQUIREMENT not in repo.requirements:
222 if repository.NARROW_REQUIREMENT not in repo.requirements:
223 return
223 return
224 repo.vfs.tryunlink(backupname)
224 repo.vfs.tryunlink(backupname)
225
225
226
226
227 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
227 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
228 r""" Restricts the patterns according to repo settings,
228 r""" Restricts the patterns according to repo settings,
229 results in a logical AND operation
229 results in a logical AND operation
230
230
231 :param req_includes: requested includes
231 :param req_includes: requested includes
232 :param req_excludes: requested excludes
232 :param req_excludes: requested excludes
233 :param repo_includes: repo includes
233 :param repo_includes: repo includes
234 :param repo_excludes: repo excludes
234 :param repo_excludes: repo excludes
235 :return: include patterns, exclude patterns, and invalid include patterns.
235 :return: include patterns, exclude patterns, and invalid include patterns.
236
237 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
238 (set(['f1']), {}, [])
239 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
240 (set(['f1']), {}, [])
241 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
242 (set(['f1/fc1']), {}, [])
243 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
244 ([], set(['path:.']), [])
245 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
246 (set(['f2/fc2']), {}, [])
247 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
248 ([], set(['path:.']), [])
249 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
250 (set(['f1/$non_exitent_var']), {}, [])
251 """
236 """
252 res_excludes = set(req_excludes)
237 res_excludes = set(req_excludes)
253 res_excludes.update(repo_excludes)
238 res_excludes.update(repo_excludes)
254 invalid_includes = []
239 invalid_includes = []
255 if not req_includes:
240 if not req_includes:
256 res_includes = set(repo_includes)
241 res_includes = set(repo_includes)
257 elif b'path:.' not in repo_includes:
242 elif b'path:.' not in repo_includes:
258 res_includes = []
243 res_includes = []
259 for req_include in req_includes:
244 for req_include in req_includes:
260 req_include = util.expandpath(util.normpath(req_include))
245 req_include = util.expandpath(util.normpath(req_include))
261 if req_include in repo_includes:
246 if req_include in repo_includes:
262 res_includes.append(req_include)
247 res_includes.append(req_include)
263 continue
248 continue
264 valid = False
249 valid = False
265 for repo_include in repo_includes:
250 for repo_include in repo_includes:
266 if req_include.startswith(repo_include + b'/'):
251 if req_include.startswith(repo_include + b'/'):
267 valid = True
252 valid = True
268 res_includes.append(req_include)
253 res_includes.append(req_include)
269 break
254 break
270 if not valid:
255 if not valid:
271 invalid_includes.append(req_include)
256 invalid_includes.append(req_include)
272 if len(res_includes) == 0:
257 if len(res_includes) == 0:
273 res_excludes = {b'path:.'}
258 res_excludes = {b'path:.'}
274 else:
259 else:
275 res_includes = set(res_includes)
260 res_includes = set(res_includes)
276 else:
261 else:
277 res_includes = set(req_includes)
262 res_includes = set(req_includes)
278 return res_includes, res_excludes, invalid_includes
263 return res_includes, res_excludes, invalid_includes
279
264
280
265
281 # These two are extracted for extensions (specifically for Google's CitC file
266 # These two are extracted for extensions (specifically for Google's CitC file
282 # system)
267 # system)
283 def _deletecleanfiles(repo, files):
268 def _deletecleanfiles(repo, files):
284 for f in files:
269 for f in files:
285 repo.wvfs.unlinkpath(f)
270 repo.wvfs.unlinkpath(f)
286
271
287
272
288 def _writeaddedfiles(repo, pctx, files):
273 def _writeaddedfiles(repo, pctx, files):
289 actions = merge.emptyactions()
274 actions = merge.emptyactions()
290 addgaction = actions[merge.ACTION_GET].append
275 addgaction = actions[merge.ACTION_GET].append
291 mf = repo[b'.'].manifest()
276 mf = repo[b'.'].manifest()
292 for f in files:
277 for f in files:
293 if not repo.wvfs.exists(f):
278 if not repo.wvfs.exists(f):
294 addgaction((f, (mf.flags(f), False), b"narrowspec updated"))
279 addgaction((f, (mf.flags(f), False), b"narrowspec updated"))
295 merge.applyupdates(
280 merge.applyupdates(
296 repo,
281 repo,
297 actions,
282 actions,
298 wctx=repo[None],
283 wctx=repo[None],
299 mctx=repo[b'.'],
284 mctx=repo[b'.'],
300 overwrite=False,
285 overwrite=False,
301 wantfiledata=False,
286 wantfiledata=False,
302 )
287 )
303
288
304
289
305 def checkworkingcopynarrowspec(repo):
290 def checkworkingcopynarrowspec(repo):
306 # Avoid infinite recursion when updating the working copy
291 # Avoid infinite recursion when updating the working copy
307 if getattr(repo, '_updatingnarrowspec', False):
292 if getattr(repo, '_updatingnarrowspec', False):
308 return
293 return
309 storespec = repo.svfs.tryread(FILENAME)
294 storespec = repo.svfs.tryread(FILENAME)
310 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
295 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
311 if wcspec != storespec:
296 if wcspec != storespec:
312 raise error.Abort(
297 raise error.Abort(
313 _(b"working copy's narrowspec is stale"),
298 _(b"working copy's narrowspec is stale"),
314 hint=_(b"run 'hg tracked --update-working-copy'"),
299 hint=_(b"run 'hg tracked --update-working-copy'"),
315 )
300 )
316
301
317
302
318 def updateworkingcopy(repo, assumeclean=False):
303 def updateworkingcopy(repo, assumeclean=False):
319 """updates the working copy and dirstate from the store narrowspec
304 """updates the working copy and dirstate from the store narrowspec
320
305
321 When assumeclean=True, files that are not known to be clean will also
306 When assumeclean=True, files that are not known to be clean will also
322 be deleted. It is then up to the caller to make sure they are clean.
307 be deleted. It is then up to the caller to make sure they are clean.
323 """
308 """
324 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
309 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
325 newspec = repo.svfs.tryread(FILENAME)
310 newspec = repo.svfs.tryread(FILENAME)
326 repo._updatingnarrowspec = True
311 repo._updatingnarrowspec = True
327
312
328 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
313 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
329 newincludes, newexcludes = parseconfig(repo.ui, newspec)
314 newincludes, newexcludes = parseconfig(repo.ui, newspec)
330 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
315 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
331 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
316 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
332 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
317 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
333 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
318 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
334
319
335 ds = repo.dirstate
320 ds = repo.dirstate
336 lookup, status = ds.status(
321 lookup, status = ds.status(
337 removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
322 removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
338 )
323 )
339 trackeddirty = status.modified + status.added
324 trackeddirty = status.modified + status.added
340 clean = status.clean
325 clean = status.clean
341 if assumeclean:
326 if assumeclean:
342 assert not trackeddirty
327 assert not trackeddirty
343 clean.extend(lookup)
328 clean.extend(lookup)
344 else:
329 else:
345 trackeddirty.extend(lookup)
330 trackeddirty.extend(lookup)
346 _deletecleanfiles(repo, clean)
331 _deletecleanfiles(repo, clean)
347 uipathfn = scmutil.getuipathfn(repo)
332 uipathfn = scmutil.getuipathfn(repo)
348 for f in sorted(trackeddirty):
333 for f in sorted(trackeddirty):
349 repo.ui.status(
334 repo.ui.status(
350 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
335 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
351 )
336 )
352 for f in sorted(status.unknown):
337 for f in sorted(status.unknown):
353 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
338 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
354 for f in sorted(status.ignored):
339 for f in sorted(status.ignored):
355 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
340 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
356 for f in clean + trackeddirty:
341 for f in clean + trackeddirty:
357 ds.drop(f)
342 ds.drop(f)
358
343
359 pctx = repo[b'.']
344 pctx = repo[b'.']
360 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
345 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
361 for f in newfiles:
346 for f in newfiles:
362 ds.normallookup(f)
347 ds.normallookup(f)
363 _writeaddedfiles(repo, pctx, newfiles)
348 _writeaddedfiles(repo, pctx, newfiles)
364 repo._updatingnarrowspec = False
349 repo._updatingnarrowspec = False
General Comments 0
You need to be logged in to leave comments. Login now