##// END OF EJS Templates
tests: remove doctest in narrowspec, it is broken...
Kyle Lippincott -
r45053:1922694d default
parent child Browse files
Show More
@@ -1,364 +1,349 b''
1 1 # narrowspec.py - methods for working with a narrow view of a repository
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 from .i18n import _
11 11 from .pycompat import getattr
12 12 from .interfaces import repository
13 13 from . import (
14 14 error,
15 15 match as matchmod,
16 16 merge,
17 17 scmutil,
18 18 sparse,
19 19 util,
20 20 )
21 21
22 22 # The file in .hg/store/ that indicates which paths exit in the store
23 23 FILENAME = b'narrowspec'
24 24 # The file in .hg/ that indicates which paths exit in the dirstate
25 25 DIRSTATE_FILENAME = b'narrowspec.dirstate'
26 26
27 27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 28 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 29 # that patterns are supplied by clients and executed on remote servers
30 30 # as part of wire protocol commands. That means that changes to this
31 31 # data structure influence the wire protocol and should not be taken
32 32 # lightly - especially removals.
33 33 VALID_PREFIXES = (
34 34 b'path:',
35 35 b'rootfilesin:',
36 36 )
37 37
38 38
39 39 def normalizesplitpattern(kind, pat):
40 40 """Returns the normalized version of a pattern and kind.
41 41
42 42 Returns a tuple with the normalized kind and normalized pattern.
43 43 """
44 44 pat = pat.rstrip(b'/')
45 45 _validatepattern(pat)
46 46 return kind, pat
47 47
48 48
49 49 def _numlines(s):
50 50 """Returns the number of lines in s, including ending empty lines."""
51 51 # We use splitlines because it is Unicode-friendly and thus Python 3
52 52 # compatible. However, it does not count empty lines at the end, so trick
53 53 # it by adding a character at the end.
54 54 return len((s + b'x').splitlines())
55 55
56 56
57 57 def _validatepattern(pat):
58 58 """Validates the pattern and aborts if it is invalid.
59 59
60 60 Patterns are stored in the narrowspec as newline-separated
61 61 POSIX-style bytestring paths. There's no escaping.
62 62 """
63 63
64 64 # We use newlines as separators in the narrowspec file, so don't allow them
65 65 # in patterns.
66 66 if _numlines(pat) > 1:
67 67 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
68 68
69 69 components = pat.split(b'/')
70 70 if b'.' in components or b'..' in components:
71 71 raise error.Abort(
72 72 _(b'"." and ".." are not allowed in narrowspec paths')
73 73 )
74 74
75 75
76 76 def normalizepattern(pattern, defaultkind=b'path'):
77 77 """Returns the normalized version of a text-format pattern.
78 78
79 79 If the pattern has no kind, the default will be added.
80 80 """
81 81 kind, pat = matchmod._patsplit(pattern, defaultkind)
82 82 return b'%s:%s' % normalizesplitpattern(kind, pat)
83 83
84 84
85 85 def parsepatterns(pats):
86 86 """Parses an iterable of patterns into a typed pattern set.
87 87
88 88 Patterns are assumed to be ``path:`` if no prefix is present.
89 89 For safety and performance reasons, only some prefixes are allowed.
90 90 See ``validatepatterns()``.
91 91
92 92 This function should be used on patterns that come from the user to
93 93 normalize and validate them to the internal data structure used for
94 94 representing patterns.
95 95 """
96 96 res = {normalizepattern(orig) for orig in pats}
97 97 validatepatterns(res)
98 98 return res
99 99
100 100
101 101 def validatepatterns(pats):
102 102 """Validate that patterns are in the expected data structure and format.
103 103
104 104 And that is a set of normalized patterns beginning with ``path:`` or
105 105 ``rootfilesin:``.
106 106
107 107 This function should be used to validate internal data structures
108 108 and patterns that are loaded from sources that use the internal,
109 109 prefixed pattern representation (but can't necessarily be fully trusted).
110 110 """
111 111 if not isinstance(pats, set):
112 112 raise error.ProgrammingError(
113 113 b'narrow patterns should be a set; got %r' % pats
114 114 )
115 115
116 116 for pat in pats:
117 117 if not pat.startswith(VALID_PREFIXES):
118 118 # Use a Mercurial exception because this can happen due to user
119 119 # bugs (e.g. manually updating spec file).
120 120 raise error.Abort(
121 121 _(b'invalid prefix on narrow pattern: %s') % pat,
122 122 hint=_(
123 123 b'narrow patterns must begin with one of '
124 124 b'the following: %s'
125 125 )
126 126 % b', '.join(VALID_PREFIXES),
127 127 )
128 128
129 129
130 130 def format(includes, excludes):
131 131 output = b'[include]\n'
132 132 for i in sorted(includes - excludes):
133 133 output += i + b'\n'
134 134 output += b'[exclude]\n'
135 135 for e in sorted(excludes):
136 136 output += e + b'\n'
137 137 return output
138 138
139 139
140 140 def match(root, include=None, exclude=None):
141 141 if not include:
142 142 # Passing empty include and empty exclude to matchmod.match()
143 143 # gives a matcher that matches everything, so explicitly use
144 144 # the nevermatcher.
145 145 return matchmod.never()
146 146 return matchmod.match(
147 147 root, b'', [], include=include or [], exclude=exclude or []
148 148 )
149 149
150 150
151 151 def parseconfig(ui, spec):
152 152 # maybe we should care about the profiles returned too
153 153 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
154 154 if profiles:
155 155 raise error.Abort(
156 156 _(
157 157 b"including other spec files using '%include' is not"
158 158 b" supported in narrowspec"
159 159 )
160 160 )
161 161
162 162 validatepatterns(includepats)
163 163 validatepatterns(excludepats)
164 164
165 165 return includepats, excludepats
166 166
167 167
168 168 def load(repo):
169 169 # Treat "narrowspec does not exist" the same as "narrowspec file exists
170 170 # and is empty".
171 171 spec = repo.svfs.tryread(FILENAME)
172 172 return parseconfig(repo.ui, spec)
173 173
174 174
175 175 def save(repo, includepats, excludepats):
176 176 validatepatterns(includepats)
177 177 validatepatterns(excludepats)
178 178 spec = format(includepats, excludepats)
179 179 repo.svfs.write(FILENAME, spec)
180 180
181 181
182 182 def copytoworkingcopy(repo):
183 183 spec = repo.svfs.read(FILENAME)
184 184 repo.vfs.write(DIRSTATE_FILENAME, spec)
185 185
186 186
187 187 def savebackup(repo, backupname):
188 188 if repository.NARROW_REQUIREMENT not in repo.requirements:
189 189 return
190 190 svfs = repo.svfs
191 191 svfs.tryunlink(backupname)
192 192 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
193 193
194 194
195 195 def restorebackup(repo, backupname):
196 196 if repository.NARROW_REQUIREMENT not in repo.requirements:
197 197 return
198 198 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
199 199
200 200
201 201 def savewcbackup(repo, backupname):
202 202 if repository.NARROW_REQUIREMENT not in repo.requirements:
203 203 return
204 204 vfs = repo.vfs
205 205 vfs.tryunlink(backupname)
206 206 # It may not exist in old repos
207 207 if vfs.exists(DIRSTATE_FILENAME):
208 208 util.copyfile(
209 209 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
210 210 )
211 211
212 212
213 213 def restorewcbackup(repo, backupname):
214 214 if repository.NARROW_REQUIREMENT not in repo.requirements:
215 215 return
216 216 # It may not exist in old repos
217 217 if repo.vfs.exists(backupname):
218 218 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
219 219
220 220
221 221 def clearwcbackup(repo, backupname):
222 222 if repository.NARROW_REQUIREMENT not in repo.requirements:
223 223 return
224 224 repo.vfs.tryunlink(backupname)
225 225
226 226
227 227 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
228 228 r""" Restricts the patterns according to repo settings,
229 229 results in a logical AND operation
230 230
231 231 :param req_includes: requested includes
232 232 :param req_excludes: requested excludes
233 233 :param repo_includes: repo includes
234 234 :param repo_excludes: repo excludes
235 235 :return: include patterns, exclude patterns, and invalid include patterns.
236
237 >>> restrictpatterns({'f1','f2'}, {}, ['f1'], [])
238 (set(['f1']), {}, [])
239 >>> restrictpatterns({'f1'}, {}, ['f1','f2'], [])
240 (set(['f1']), {}, [])
241 >>> restrictpatterns({'f1/fc1', 'f3/fc3'}, {}, ['f1','f2'], [])
242 (set(['f1/fc1']), {}, [])
243 >>> restrictpatterns({'f1_fc1'}, {}, ['f1','f2'], [])
244 ([], set(['path:.']), [])
245 >>> restrictpatterns({'f1/../f2/fc2'}, {}, ['f1','f2'], [])
246 (set(['f2/fc2']), {}, [])
247 >>> restrictpatterns({'f1/../f3/fc3'}, {}, ['f1','f2'], [])
248 ([], set(['path:.']), [])
249 >>> restrictpatterns({'f1/$non_exitent_var'}, {}, ['f1','f2'], [])
250 (set(['f1/$non_exitent_var']), {}, [])
251 236 """
252 237 res_excludes = set(req_excludes)
253 238 res_excludes.update(repo_excludes)
254 239 invalid_includes = []
255 240 if not req_includes:
256 241 res_includes = set(repo_includes)
257 242 elif b'path:.' not in repo_includes:
258 243 res_includes = []
259 244 for req_include in req_includes:
260 245 req_include = util.expandpath(util.normpath(req_include))
261 246 if req_include in repo_includes:
262 247 res_includes.append(req_include)
263 248 continue
264 249 valid = False
265 250 for repo_include in repo_includes:
266 251 if req_include.startswith(repo_include + b'/'):
267 252 valid = True
268 253 res_includes.append(req_include)
269 254 break
270 255 if not valid:
271 256 invalid_includes.append(req_include)
272 257 if len(res_includes) == 0:
273 258 res_excludes = {b'path:.'}
274 259 else:
275 260 res_includes = set(res_includes)
276 261 else:
277 262 res_includes = set(req_includes)
278 263 return res_includes, res_excludes, invalid_includes
279 264
280 265
281 266 # These two are extracted for extensions (specifically for Google's CitC file
282 267 # system)
283 268 def _deletecleanfiles(repo, files):
284 269 for f in files:
285 270 repo.wvfs.unlinkpath(f)
286 271
287 272
288 273 def _writeaddedfiles(repo, pctx, files):
289 274 actions = merge.emptyactions()
290 275 addgaction = actions[merge.ACTION_GET].append
291 276 mf = repo[b'.'].manifest()
292 277 for f in files:
293 278 if not repo.wvfs.exists(f):
294 279 addgaction((f, (mf.flags(f), False), b"narrowspec updated"))
295 280 merge.applyupdates(
296 281 repo,
297 282 actions,
298 283 wctx=repo[None],
299 284 mctx=repo[b'.'],
300 285 overwrite=False,
301 286 wantfiledata=False,
302 287 )
303 288
304 289
305 290 def checkworkingcopynarrowspec(repo):
306 291 # Avoid infinite recursion when updating the working copy
307 292 if getattr(repo, '_updatingnarrowspec', False):
308 293 return
309 294 storespec = repo.svfs.tryread(FILENAME)
310 295 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
311 296 if wcspec != storespec:
312 297 raise error.Abort(
313 298 _(b"working copy's narrowspec is stale"),
314 299 hint=_(b"run 'hg tracked --update-working-copy'"),
315 300 )
316 301
317 302
318 303 def updateworkingcopy(repo, assumeclean=False):
319 304 """updates the working copy and dirstate from the store narrowspec
320 305
321 306 When assumeclean=True, files that are not known to be clean will also
322 307 be deleted. It is then up to the caller to make sure they are clean.
323 308 """
324 309 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
325 310 newspec = repo.svfs.tryread(FILENAME)
326 311 repo._updatingnarrowspec = True
327 312
328 313 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
329 314 newincludes, newexcludes = parseconfig(repo.ui, newspec)
330 315 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
331 316 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
332 317 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
333 318 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
334 319
335 320 ds = repo.dirstate
336 321 lookup, status = ds.status(
337 322 removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
338 323 )
339 324 trackeddirty = status.modified + status.added
340 325 clean = status.clean
341 326 if assumeclean:
342 327 assert not trackeddirty
343 328 clean.extend(lookup)
344 329 else:
345 330 trackeddirty.extend(lookup)
346 331 _deletecleanfiles(repo, clean)
347 332 uipathfn = scmutil.getuipathfn(repo)
348 333 for f in sorted(trackeddirty):
349 334 repo.ui.status(
350 335 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
351 336 )
352 337 for f in sorted(status.unknown):
353 338 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
354 339 for f in sorted(status.ignored):
355 340 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
356 341 for f in clean + trackeddirty:
357 342 ds.drop(f)
358 343
359 344 pctx = repo[b'.']
360 345 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
361 346 for f in newfiles:
362 347 ds.normallookup(f)
363 348 _writeaddedfiles(repo, pctx, newfiles)
364 349 repo._updatingnarrowspec = False
General Comments 0
You need to be logged in to leave comments. Login now