##// END OF EJS Templates
narrowspec: replace one recursion-avoidance hack with another...
Martin von Zweigbergk -
r42603:4738c292 default
parent child Browse files
Show More
@@ -1,314 +1,317 b''
1 1 # narrowspec.py - methods for working with a narrow view of a repository
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from . import (
12 12 error,
13 13 match as matchmod,
14 14 merge,
15 15 repository,
16 16 scmutil,
17 17 sparse,
18 18 util,
19 19 )
20 20
21 21 # The file in .hg/store/ that indicates which paths exit in the store
22 22 FILENAME = 'narrowspec'
23 23 # The file in .hg/ that indicates which paths exit in the dirstate
24 24 DIRSTATE_FILENAME = 'narrowspec.dirstate'
25 25
26 26 # Pattern prefixes that are allowed in narrow patterns. This list MUST
27 27 # only contain patterns that are fast and safe to evaluate. Keep in mind
28 28 # that patterns are supplied by clients and executed on remote servers
29 29 # as part of wire protocol commands. That means that changes to this
30 30 # data structure influence the wire protocol and should not be taken
31 31 # lightly - especially removals.
32 32 VALID_PREFIXES = (
33 33 b'path:',
34 34 b'rootfilesin:',
35 35 )
36 36
37 37 def normalizesplitpattern(kind, pat):
38 38 """Returns the normalized version of a pattern and kind.
39 39
40 40 Returns a tuple with the normalized kind and normalized pattern.
41 41 """
42 42 pat = pat.rstrip('/')
43 43 _validatepattern(pat)
44 44 return kind, pat
45 45
46 46 def _numlines(s):
47 47 """Returns the number of lines in s, including ending empty lines."""
48 48 # We use splitlines because it is Unicode-friendly and thus Python 3
49 49 # compatible. However, it does not count empty lines at the end, so trick
50 50 # it by adding a character at the end.
51 51 return len((s + 'x').splitlines())
52 52
53 53 def _validatepattern(pat):
54 54 """Validates the pattern and aborts if it is invalid.
55 55
56 56 Patterns are stored in the narrowspec as newline-separated
57 57 POSIX-style bytestring paths. There's no escaping.
58 58 """
59 59
60 60 # We use newlines as separators in the narrowspec file, so don't allow them
61 61 # in patterns.
62 62 if _numlines(pat) > 1:
63 63 raise error.Abort(_('newlines are not allowed in narrowspec paths'))
64 64
65 65 components = pat.split('/')
66 66 if '.' in components or '..' in components:
67 67 raise error.Abort(_('"." and ".." are not allowed in narrowspec paths'))
68 68
69 69 def normalizepattern(pattern, defaultkind='path'):
70 70 """Returns the normalized version of a text-format pattern.
71 71
72 72 If the pattern has no kind, the default will be added.
73 73 """
74 74 kind, pat = matchmod._patsplit(pattern, defaultkind)
75 75 return '%s:%s' % normalizesplitpattern(kind, pat)
76 76
77 77 def parsepatterns(pats):
78 78 """Parses an iterable of patterns into a typed pattern set.
79 79
80 80 Patterns are assumed to be ``path:`` if no prefix is present.
81 81 For safety and performance reasons, only some prefixes are allowed.
82 82 See ``validatepatterns()``.
83 83
84 84 This function should be used on patterns that come from the user to
85 85 normalize and validate them to the internal data structure used for
86 86 representing patterns.
87 87 """
88 88 res = {normalizepattern(orig) for orig in pats}
89 89 validatepatterns(res)
90 90 return res
91 91
92 92 def validatepatterns(pats):
93 93 """Validate that patterns are in the expected data structure and format.
94 94
95 95 And that is a set of normalized patterns beginning with ``path:`` or
96 96 ``rootfilesin:``.
97 97
98 98 This function should be used to validate internal data structures
99 99 and patterns that are loaded from sources that use the internal,
100 100 prefixed pattern representation (but can't necessarily be fully trusted).
101 101 """
102 102 if not isinstance(pats, set):
103 103 raise error.ProgrammingError('narrow patterns should be a set; '
104 104 'got %r' % pats)
105 105
106 106 for pat in pats:
107 107 if not pat.startswith(VALID_PREFIXES):
108 108 # Use a Mercurial exception because this can happen due to user
109 109 # bugs (e.g. manually updating spec file).
110 110 raise error.Abort(_('invalid prefix on narrow pattern: %s') % pat,
111 111 hint=_('narrow patterns must begin with one of '
112 112 'the following: %s') %
113 113 ', '.join(VALID_PREFIXES))
114 114
115 115 def format(includes, excludes):
116 116 output = '[include]\n'
117 117 for i in sorted(includes - excludes):
118 118 output += i + '\n'
119 119 output += '[exclude]\n'
120 120 for e in sorted(excludes):
121 121 output += e + '\n'
122 122 return output
123 123
124 124 def match(root, include=None, exclude=None):
125 125 if not include:
126 126 # Passing empty include and empty exclude to matchmod.match()
127 127 # gives a matcher that matches everything, so explicitly use
128 128 # the nevermatcher.
129 129 return matchmod.never()
130 130 return matchmod.match(root, '', [], include=include or [],
131 131 exclude=exclude or [])
132 132
133 133 def parseconfig(ui, spec):
134 134 # maybe we should care about the profiles returned too
135 135 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, 'narrow')
136 136 if profiles:
137 137 raise error.Abort(_("including other spec files using '%include' is not"
138 138 " supported in narrowspec"))
139 139
140 140 validatepatterns(includepats)
141 141 validatepatterns(excludepats)
142 142
143 143 return includepats, excludepats
144 144
145 145 def load(repo):
146 146 # Treat "narrowspec does not exist" the same as "narrowspec file exists
147 147 # and is empty".
148 148 spec = repo.svfs.tryread(FILENAME)
149 149 return parseconfig(repo.ui, spec)
150 150
151 151 def save(repo, includepats, excludepats):
152 152 validatepatterns(includepats)
153 153 validatepatterns(excludepats)
154 154 spec = format(includepats, excludepats)
155 155 repo.svfs.write(FILENAME, spec)
156 156
157 157 def copytoworkingcopy(repo):
158 158 spec = repo.svfs.read(FILENAME)
159 159 repo.vfs.write(DIRSTATE_FILENAME, spec)
160 160
161 161 def savebackup(repo, backupname):
162 162 if repository.NARROW_REQUIREMENT not in repo.requirements:
163 163 return
164 164 svfs = repo.svfs
165 165 svfs.tryunlink(backupname)
166 166 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
167 167
168 168 def restorebackup(repo, backupname):
169 169 if repository.NARROW_REQUIREMENT not in repo.requirements:
170 170 return
171 171 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
172 172
173 173 def savewcbackup(repo, backupname):
174 174 if repository.NARROW_REQUIREMENT not in repo.requirements:
175 175 return
176 176 vfs = repo.vfs
177 177 vfs.tryunlink(backupname)
178 178 # It may not exist in old repos
179 179 if vfs.exists(DIRSTATE_FILENAME):
180 180 util.copyfile(vfs.join(DIRSTATE_FILENAME), vfs.join(backupname),
181 181 hardlink=True)
182 182
183 183 def restorewcbackup(repo, backupname):
184 184 if repository.NARROW_REQUIREMENT not in repo.requirements:
185 185 return
186 186 # It may not exist in old repos
187 187 if repo.vfs.exists(backupname):
188 188 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
189 189
190 190 def clearwcbackup(repo, backupname):
191 191 if repository.NARROW_REQUIREMENT not in repo.requirements:
192 192 return
193 193 repo.vfs.tryunlink(backupname)
194 194
195 195 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
196 196 r""" Restricts the patterns according to repo settings,
197 197 results in a logical AND operation
198 198
199 199 :param req_includes: requested includes
200 200 :param req_excludes: requested excludes
201 201 :param repo_includes: repo includes
202 202 :param repo_excludes: repo excludes
203 203 :return: include patterns, exclude patterns, and invalid include patterns.
204 204
205 205 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
206 206 (set(['f1']), {}, [])
207 207 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
208 208 (set(['f1']), {}, [])
209 209 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
210 210 (set(['f1/fc1']), {}, [])
211 211 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
212 212 ([], set(['path:.']), [])
213 213 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
214 214 (set(['f2/fc2']), {}, [])
215 215 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
216 216 ([], set(['path:.']), [])
217 217 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
218 218 (set(['f1/$non_exitent_var']), {}, [])
219 219 """
220 220 res_excludes = set(req_excludes)
221 221 res_excludes.update(repo_excludes)
222 222 invalid_includes = []
223 223 if not req_includes:
224 224 res_includes = set(repo_includes)
225 225 elif 'path:.' not in repo_includes:
226 226 res_includes = []
227 227 for req_include in req_includes:
228 228 req_include = util.expandpath(util.normpath(req_include))
229 229 if req_include in repo_includes:
230 230 res_includes.append(req_include)
231 231 continue
232 232 valid = False
233 233 for repo_include in repo_includes:
234 234 if req_include.startswith(repo_include + '/'):
235 235 valid = True
236 236 res_includes.append(req_include)
237 237 break
238 238 if not valid:
239 239 invalid_includes.append(req_include)
240 240 if len(res_includes) == 0:
241 241 res_excludes = {'path:.'}
242 242 else:
243 243 res_includes = set(res_includes)
244 244 else:
245 245 res_includes = set(req_includes)
246 246 return res_includes, res_excludes, invalid_includes
247 247
248 248 # These two are extracted for extensions (specifically for Google's CitC file
249 249 # system)
250 250 def _deletecleanfiles(repo, files):
251 251 for f in files:
252 252 repo.wvfs.unlinkpath(f)
253 253
254 254 def _writeaddedfiles(repo, pctx, files):
255 255 actions = merge.emptyactions()
256 256 addgaction = actions[merge.ACTION_GET].append
257 257 mf = repo['.'].manifest()
258 258 for f in files:
259 259 if not repo.wvfs.exists(f):
260 260 addgaction((f, (mf.flags(f), False), "narrowspec updated"))
261 261 merge.applyupdates(repo, actions, wctx=repo[None],
262 262 mctx=repo['.'], overwrite=False)
263 263
264 264 def checkworkingcopynarrowspec(repo):
265 # Avoid infinite recursion when updating the working copy
266 if getattr(repo, '_updatingnarrowspec', False):
267 return
265 268 storespec = repo.svfs.tryread(FILENAME)
266 269 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
267 270 if wcspec != storespec:
268 271 raise error.Abort(_("working copy's narrowspec is stale"),
269 272 hint=_("run 'hg tracked --update-working-copy'"))
270 273
271 274 def updateworkingcopy(repo, assumeclean=False):
272 275 """updates the working copy and dirstate from the store narrowspec
273 276
274 277 When assumeclean=True, files that are not known to be clean will also
275 278 be deleted. It is then up to the caller to make sure they are clean.
276 279 """
277 280 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
278 281 newspec = repo.svfs.tryread(FILENAME)
282 repo._updatingnarrowspec = True
279 283
280 284 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
281 285 newincludes, newexcludes = parseconfig(repo.ui, newspec)
282 286 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
283 287 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
284 288 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
285 289 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
286 290
287 291 ds = repo.dirstate
288 292 lookup, status = ds.status(removedmatch, subrepos=[], ignored=True,
289 293 clean=True, unknown=True)
290 294 trackeddirty = status.modified + status.added
291 295 clean = status.clean
292 296 if assumeclean:
293 297 assert not trackeddirty
294 298 clean.extend(lookup)
295 299 else:
296 300 trackeddirty.extend(lookup)
297 301 _deletecleanfiles(repo, clean)
298 302 uipathfn = scmutil.getuipathfn(repo)
299 303 for f in sorted(trackeddirty):
300 304 repo.ui.status(_('not deleting possibly dirty file %s\n') % uipathfn(f))
301 305 for f in sorted(status.unknown):
302 306 repo.ui.status(_('not deleting unknown file %s\n') % uipathfn(f))
303 307 for f in sorted(status.ignored):
304 308 repo.ui.status(_('not deleting ignored file %s\n') % uipathfn(f))
305 309 for f in clean + trackeddirty:
306 310 ds.drop(f)
307 311
308 repo.narrowpats = newincludes, newexcludes
309 repo._narrowmatch = newmatch
310 312 pctx = repo['.']
311 313 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
312 314 for f in newfiles:
313 315 ds.normallookup(f)
314 316 _writeaddedfiles(repo, pctx, newfiles)
317 repo._updatingnarrowspec = False
General Comments 0
You need to be logged in to leave comments. Login now