##// END OF EJS Templates
narrowspec: replace one recursion-avoidance hack with another...
Martin von Zweigbergk -
r42603:4738c292 default
parent child Browse files
Show More
@@ -1,314 +1,317 b''
1 # narrowspec.py - methods for working with a narrow view of a repository
1 # narrowspec.py - methods for working with a narrow view of a repository
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 match as matchmod,
13 match as matchmod,
14 merge,
14 merge,
15 repository,
15 repository,
16 scmutil,
16 scmutil,
17 sparse,
17 sparse,
18 util,
18 util,
19 )
19 )
20
20
21 # The file in .hg/store/ that indicates which paths exit in the store
21 # The file in .hg/store/ that indicates which paths exit in the store
22 FILENAME = 'narrowspec'
22 FILENAME = 'narrowspec'
23 # The file in .hg/ that indicates which paths exit in the dirstate
23 # The file in .hg/ that indicates which paths exit in the dirstate
24 DIRSTATE_FILENAME = 'narrowspec.dirstate'
24 DIRSTATE_FILENAME = 'narrowspec.dirstate'
25
25
26 # Pattern prefixes that are allowed in narrow patterns. This list MUST
26 # Pattern prefixes that are allowed in narrow patterns. This list MUST
27 # only contain patterns that are fast and safe to evaluate. Keep in mind
27 # only contain patterns that are fast and safe to evaluate. Keep in mind
28 # that patterns are supplied by clients and executed on remote servers
28 # that patterns are supplied by clients and executed on remote servers
29 # as part of wire protocol commands. That means that changes to this
29 # as part of wire protocol commands. That means that changes to this
30 # data structure influence the wire protocol and should not be taken
30 # data structure influence the wire protocol and should not be taken
31 # lightly - especially removals.
31 # lightly - especially removals.
32 VALID_PREFIXES = (
32 VALID_PREFIXES = (
33 b'path:',
33 b'path:',
34 b'rootfilesin:',
34 b'rootfilesin:',
35 )
35 )
36
36
37 def normalizesplitpattern(kind, pat):
37 def normalizesplitpattern(kind, pat):
38 """Returns the normalized version of a pattern and kind.
38 """Returns the normalized version of a pattern and kind.
39
39
40 Returns a tuple with the normalized kind and normalized pattern.
40 Returns a tuple with the normalized kind and normalized pattern.
41 """
41 """
42 pat = pat.rstrip('/')
42 pat = pat.rstrip('/')
43 _validatepattern(pat)
43 _validatepattern(pat)
44 return kind, pat
44 return kind, pat
45
45
46 def _numlines(s):
46 def _numlines(s):
47 """Returns the number of lines in s, including ending empty lines."""
47 """Returns the number of lines in s, including ending empty lines."""
48 # We use splitlines because it is Unicode-friendly and thus Python 3
48 # We use splitlines because it is Unicode-friendly and thus Python 3
49 # compatible. However, it does not count empty lines at the end, so trick
49 # compatible. However, it does not count empty lines at the end, so trick
50 # it by adding a character at the end.
50 # it by adding a character at the end.
51 return len((s + 'x').splitlines())
51 return len((s + 'x').splitlines())
52
52
53 def _validatepattern(pat):
53 def _validatepattern(pat):
54 """Validates the pattern and aborts if it is invalid.
54 """Validates the pattern and aborts if it is invalid.
55
55
56 Patterns are stored in the narrowspec as newline-separated
56 Patterns are stored in the narrowspec as newline-separated
57 POSIX-style bytestring paths. There's no escaping.
57 POSIX-style bytestring paths. There's no escaping.
58 """
58 """
59
59
60 # We use newlines as separators in the narrowspec file, so don't allow them
60 # We use newlines as separators in the narrowspec file, so don't allow them
61 # in patterns.
61 # in patterns.
62 if _numlines(pat) > 1:
62 if _numlines(pat) > 1:
63 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
63 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
64
64
65 components = pat.split('/')
65 components = pat.split('/')
66 if '.' in components or '..' in components:
66 if '.' in components or '..' in components:
67 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
67 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
68
68
69 def normalizepattern(pattern, defaultkind='path'):
69 def normalizepattern(pattern, defaultkind='path'):
70 """Returns the normalized version of a text-format pattern.
70 """Returns the normalized version of a text-format pattern.
71
71
72 If the pattern has no kind, the default will be added.
72 If the pattern has no kind, the default will be added.
73 """
73 """
74 kind, pat = matchmod._patsplit(pattern, defaultkind)
74 kind, pat = matchmod._patsplit(pattern, defaultkind)
75 return '%s:%s' % normalizesplitpattern(kind, pat)
75 return '%s:%s' % normalizesplitpattern(kind, pat)
76
76
77 def parsepatterns(pats):
77 def parsepatterns(pats):
78 """Parses an iterable of patterns into a typed pattern set.
78 """Parses an iterable of patterns into a typed pattern set.
79
79
80 Patterns are assumed to be ``path:`` if no prefix is present.
80 Patterns are assumed to be ``path:`` if no prefix is present.
81 For safety and performance reasons, only some prefixes are allowed.
81 For safety and performance reasons, only some prefixes are allowed.
82 See ``validatepatterns()``.
82 See ``validatepatterns()``.
83
83
84 This function should be used on patterns that come from the user to
84 This function should be used on patterns that come from the user to
85 normalize and validate them to the internal data structure used for
85 normalize and validate them to the internal data structure used for
86 representing patterns.
86 representing patterns.
87 """
87 """
88 res = {normalizepattern(orig) for orig in pats}
88 res = {normalizepattern(orig) for orig in pats}
89 validatepatterns(res)
89 validatepatterns(res)
90 return res
90 return res
91
91
92 def validatepatterns(pats):
92 def validatepatterns(pats):
93 """Validate that patterns are in the expected data structure and format.
93 """Validate that patterns are in the expected data structure and format.
94
94
95 And that is a set of normalized patterns beginning with ``path:`` or
95 And that is a set of normalized patterns beginning with ``path:`` or
96 ``rootfilesin:``.
96 ``rootfilesin:``.
97
97
98 This function should be used to validate internal data structures
98 This function should be used to validate internal data structures
99 and patterns that are loaded from sources that use the internal,
99 and patterns that are loaded from sources that use the internal,
100 prefixed pattern representation (but can't necessarily be fully trusted).
100 prefixed pattern representation (but can't necessarily be fully trusted).
101 """
101 """
102 if not isinstance(pats, set):
102 if not isinstance(pats, set):
103 raise error.ProgrammingError('narrow patterns should be a set; '
103 raise error.ProgrammingError('narrow patterns should be a set; '
104 'got %r' % pats)
104 'got %r' % pats)
105
105
106 for pat in pats:
106 for pat in pats:
107 if not pat.startswith(VALID_PREFIXES):
107 if not pat.startswith(VALID_PREFIXES):
108 # Use a Mercurial exception because this can happen due to user
108 # Use a Mercurial exception because this can happen due to user
109 # bugs (e.g. manually updating spec file).
109 # bugs (e.g. manually updating spec file).
110 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
110 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
111 hint=_('narrow patterns must begin with one of '
111 hint=_('narrow patterns must begin with one of '
112 'the following: %s') %
112 'the following: %s') %
113 ', '.join(VALID_PREFIXES))
113 ', '.join(VALID_PREFIXES))
114
114
115 def format(includes, excludes):
115 def format(includes, excludes):
116 output = '[include]\n'
116 output = '[include]\n'
117 for i in sorted(includes - excludes):
117 for i in sorted(includes - excludes):
118 output += i + '\n'
118 output += i + '\n'
119 output += '[exclude]\n'
119 output += '[exclude]\n'
120 for e in sorted(excludes):
120 for e in sorted(excludes):
121 output += e + '\n'
121 output += e + '\n'
122 return output
122 return output
123
123
124 def match(root, include=None, exclude=None):
124 def match(root, include=None, exclude=None):
125 if not include:
125 if not include:
126 # Passing empty include and empty exclude to matchmod.match()
126 # Passing empty include and empty exclude to matchmod.match()
127 # gives a matcher that matches everything, so explicitly use
127 # gives a matcher that matches everything, so explicitly use
128 # the nevermatcher.
128 # the nevermatcher.
129 return matchmod.never()
129 return matchmod.never()
130 return matchmod.match(root, '', [], include=include or [],
130 return matchmod.match(root, '', [], include=include or [],
131 exclude=exclude or [])
131 exclude=exclude or [])
132
132
133 def parseconfig(ui, spec):
133 def parseconfig(ui, spec):
134 # maybe we should care about the profiles returned too
134 # maybe we should care about the profiles returned too
135 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
135 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
136 if profiles:
136 if profiles:
137 raise error.Abort(_("including other spec files using '%include' is not"
137 raise error.Abort(_("including other spec files using '%include' is not"
138 " supported in narrowspec"))
138 " supported in narrowspec"))
139
139
140 validatepatterns(includepats)
140 validatepatterns(includepats)
141 validatepatterns(excludepats)
141 validatepatterns(excludepats)
142
142
143 return includepats, excludepats
143 return includepats, excludepats
144
144
145 def load(repo):
145 def load(repo):
146 # Treat "narrowspec does not exist" the same as "narrowspec file exists
146 # Treat "narrowspec does not exist" the same as "narrowspec file exists
147 # and is empty".
147 # and is empty".
148 spec = repo.svfs.tryread(FILENAME)
148 spec = repo.svfs.tryread(FILENAME)
149 return parseconfig(repo.ui, spec)
149 return parseconfig(repo.ui, spec)
150
150
151 def save(repo, includepats, excludepats):
151 def save(repo, includepats, excludepats):
152 validatepatterns(includepats)
152 validatepatterns(includepats)
153 validatepatterns(excludepats)
153 validatepatterns(excludepats)
154 spec = format(includepats, excludepats)
154 spec = format(includepats, excludepats)
155 repo.svfs.write(FILENAME, spec)
155 repo.svfs.write(FILENAME, spec)
156
156
157 def copytoworkingcopy(repo):
157 def copytoworkingcopy(repo):
158 spec = repo.svfs.read(FILENAME)
158 spec = repo.svfs.read(FILENAME)
159 repo.vfs.write(DIRSTATE_FILENAME, spec)
159 repo.vfs.write(DIRSTATE_FILENAME, spec)
160
160
161 def savebackup(repo, backupname):
161 def savebackup(repo, backupname):
162 if repository.NARROW_REQUIREMENT not in repo.requirements:
162 if repository.NARROW_REQUIREMENT not in repo.requirements:
163 return
163 return
164 svfs = repo.svfs
164 svfs = repo.svfs
165 svfs.tryunlink(backupname)
165 svfs.tryunlink(backupname)
166 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
166 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
167
167
168 def restorebackup(repo, backupname):
168 def restorebackup(repo, backupname):
169 if repository.NARROW_REQUIREMENT not in repo.requirements:
169 if repository.NARROW_REQUIREMENT not in repo.requirements:
170 return
170 return
171 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
171 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
172
172
173 def savewcbackup(repo, backupname):
173 def savewcbackup(repo, backupname):
174 if repository.NARROW_REQUIREMENT not in repo.requirements:
174 if repository.NARROW_REQUIREMENT not in repo.requirements:
175 return
175 return
176 vfs = repo.vfs
176 vfs = repo.vfs
177 vfs.tryunlink(backupname)
177 vfs.tryunlink(backupname)
178 # It may not exist in old repos
178 # It may not exist in old repos
179 if vfs.exists(DIRSTATE_FILENAME):
179 if vfs.exists(DIRSTATE_FILENAME):
180 util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname),
180 util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname),
181 hardlink=True)
181 hardlink=True)
182
182
183 def restorewcbackup(repo, backupname):
183 def restorewcbackup(repo, backupname):
184 if repository.NARROW_REQUIREMENT not in repo.requirements:
184 if repository.NARROW_REQUIREMENT not in repo.requirements:
185 return
185 return
186 # It may not exist in old repos
186 # It may not exist in old repos
187 if repo.vfs.exists(backupname):
187 if repo.vfs.exists(backupname):
188 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
188 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
189
189
190 def clearwcbackup(repo, backupname):
190 def clearwcbackup(repo, backupname):
191 if repository.NARROW_REQUIREMENT not in repo.requirements:
191 if repository.NARROW_REQUIREMENT not in repo.requirements:
192 return
192 return
193 repo.vfs.tryunlink(backupname)
193 repo.vfs.tryunlink(backupname)
194
194
195 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
195 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
196 r""" Restricts the patterns according to repo settings,
196 r""" Restricts the patterns according to repo settings,
197 results in a logical AND operation
197 results in a logical AND operation
198
198
199 :param req_includes: requested includes
199 :param req_includes: requested includes
200 :param req_excludes: requested excludes
200 :param req_excludes: requested excludes
201 :param repo_includes: repo includes
201 :param repo_includes: repo includes
202 :param repo_excludes: repo excludes
202 :param repo_excludes: repo excludes
203 :return: include patterns, exclude patterns, and invalid include patterns.
203 :return: include patterns, exclude patterns, and invalid include patterns.
204
204
205 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
205 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
206 (set(['f1']), {}, [])
206 (set(['f1']), {}, [])
207 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
207 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
208 (set(['f1']), {}, [])
208 (set(['f1']), {}, [])
209 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
209 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
210 (set(['f1/fc1']), {}, [])
210 (set(['f1/fc1']), {}, [])
211 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
211 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
212 ([], set(['path:.']), [])
212 ([], set(['path:.']), [])
213 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
213 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
214 (set(['f2/fc2']), {}, [])
214 (set(['f2/fc2']), {}, [])
215 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
215 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
216 ([], set(['path:.']), [])
216 ([], set(['path:.']), [])
217 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
217 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
218 (set(['f1/$non_exitent_var']), {}, [])
218 (set(['f1/$non_exitent_var']), {}, [])
219 """
219 """
220 res_excludes = set(req_excludes)
220 res_excludes = set(req_excludes)
221 res_excludes.update(repo_excludes)
221 res_excludes.update(repo_excludes)
222 invalid_includes = []
222 invalid_includes = []
223 if not req_includes:
223 if not req_includes:
224 res_includes = set(repo_includes)
224 res_includes = set(repo_includes)
225 elif 'path:.' not in repo_includes:
225 elif 'path:.' not in repo_includes:
226 res_includes = []
226 res_includes = []
227 for req_include in req_includes:
227 for req_include in req_includes:
228 req_include = util.expandpath(util.normpath(req_include))
228 req_include = util.expandpath(util.normpath(req_include))
229 if req_include in repo_includes:
229 if req_include in repo_includes:
230 res_includes.append(req_include)
230 res_includes.append(req_include)
231 continue
231 continue
232 valid = False
232 valid = False
233 for repo_include in repo_includes:
233 for repo_include in repo_includes:
234 if req_include.startswith(repo_include + '/'):
234 if req_include.startswith(repo_include + '/'):
235 valid = True
235 valid = True
236 res_includes.append(req_include)
236 res_includes.append(req_include)
237 break
237 break
238 if not valid:
238 if not valid:
239 invalid_includes.append(req_include)
239 invalid_includes.append(req_include)
240 if len(res_includes) == 0:
240 if len(res_includes) == 0:
241 res_excludes = {'path:.'}
241 res_excludes = {'path:.'}
242 else:
242 else:
243 res_includes = set(res_includes)
243 res_includes = set(res_includes)
244 else:
244 else:
245 res_includes = set(req_includes)
245 res_includes = set(req_includes)
246 return res_includes, res_excludes, invalid_includes
246 return res_includes, res_excludes, invalid_includes
247
247
248 # These two are extracted for extensions (specifically for Google's CitC file
248 # These two are extracted for extensions (specifically for Google's CitC file
249 # system)
249 # system)
250 def _deletecleanfiles(repo, files):
250 def _deletecleanfiles(repo, files):
251 for f in files:
251 for f in files:
252 repo.wvfs.unlinkpath(f)
252 repo.wvfs.unlinkpath(f)
253
253
254 def _writeaddedfiles(repo, pctx, files):
254 def _writeaddedfiles(repo, pctx, files):
255 actions = merge.emptyactions()
255 actions = merge.emptyactions()
256 addgaction = actions[merge.ACTION_GET].append
256 addgaction = actions[merge.ACTION_GET].append
257 mf = repo['.'].manifest()
257 mf = repo['.'].manifest()
258 for f in files:
258 for f in files:
259 if not repo.wvfs.exists(f):
259 if not repo.wvfs.exists(f):
260 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
260 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
261 merge.applyupdates(repo, actions, wctx=repo[None],
261 merge.applyupdates(repo, actions, wctx=repo[None],
262 mctx=repo['.'], overwrite=False)
262 mctx=repo['.'], overwrite=False)
263
263
264 def checkworkingcopynarrowspec(repo):
264 def checkworkingcopynarrowspec(repo):
265 # Avoid infinite recursion when updating the working copy
266 if getattr(repo, '_updatingnarrowspec', False):
267 return
265 storespec = repo.svfs.tryread(FILENAME)
268 storespec = repo.svfs.tryread(FILENAME)
266 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
269 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
267 if wcspec != storespec:
270 if wcspec != storespec:
268 raise error.Abort(_("working copy's narrowspec is stale"),
271 raise error.Abort(_("working copy's narrowspec is stale"),
269 hint=_("run 'hg tracked --update-working-copy'"))
272 hint=_("run 'hg tracked --update-working-copy'"))
270
273
271 def updateworkingcopy(repo, assumeclean=False):
274 def updateworkingcopy(repo, assumeclean=False):
272 """updates the working copy and dirstate from the store narrowspec
275 """updates the working copy and dirstate from the store narrowspec
273
276
274 When assumeclean=True, files that are not known to be clean will also
277 When assumeclean=True, files that are not known to be clean will also
275 be deleted. It is then up to the caller to make sure they are clean.
278 be deleted. It is then up to the caller to make sure they are clean.
276 """
279 """
277 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
280 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
278 newspec = repo.svfs.tryread(FILENAME)
281 newspec = repo.svfs.tryread(FILENAME)
282 repo._updatingnarrowspec = True
279
283
280 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
284 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
281 newincludes, newexcludes = parseconfig(repo.ui, newspec)
285 newincludes, newexcludes = parseconfig(repo.ui, newspec)
282 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
286 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
283 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
287 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
284 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
288 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
285 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
289 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
286
290
287 ds = repo.dirstate
291 ds = repo.dirstate
288 lookup, status = ds.status(removedmatch, subrepos=[], ignored=True,
292 lookup, status = ds.status(removedmatch, subrepos=[], ignored=True,
289 clean=True, unknown=True)
293 clean=True, unknown=True)
290 trackeddirty = status.modified + status.added
294 trackeddirty = status.modified + status.added
291 clean = status.clean
295 clean = status.clean
292 if assumeclean:
296 if assumeclean:
293 assert not trackeddirty
297 assert not trackeddirty
294 clean.extend(lookup)
298 clean.extend(lookup)
295 else:
299 else:
296 trackeddirty.extend(lookup)
300 trackeddirty.extend(lookup)
297 _deletecleanfiles(repo, clean)
301 _deletecleanfiles(repo, clean)
298 uipathfn = scmutil.getuipathfn(repo)
302 uipathfn = scmutil.getuipathfn(repo)
299 for f in sorted(trackeddirty):
303 for f in sorted(trackeddirty):
300 repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f))
304 repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f))
301 for f in sorted(status.unknown):
305 for f in sorted(status.unknown):
302 repo.ui.status(_('not deleting unknown file %s\n') % uipathfn(f))
306 repo.ui.status(_('not deleting unknown file %s\n') % uipathfn(f))
303 for f in sorted(status.ignored):
307 for f in sorted(status.ignored):
304 repo.ui.status(_('not deleting ignored file %s\n') % uipathfn(f))
308 repo.ui.status(_('not deleting ignored file %s\n') % uipathfn(f))
305 for f in clean + trackeddirty:
309 for f in clean + trackeddirty:
306 ds.drop(f)
310 ds.drop(f)
307
311
308 repo.narrowpats = newincludes, newexcludes
309 repo._narrowmatch = newmatch
310 pctx = repo['.']
312 pctx = repo['.']
311 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
313 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
312 for f in newfiles:
314 for f in newfiles:
313 ds.normallookup(f)
315 ds.normallookup(f)
314 _writeaddedfiles(repo, pctx, newfiles)
316 _writeaddedfiles(repo, pctx, newfiles)
317 repo._updatingnarrowspec = False
General Comments 0
You need to be logged in to leave comments. Login now