##// END OF EJS Templates
narrow: use `running_status` in `updateworkingcopy`...
marmoute -
r51030:31be0b46 default
parent child Browse files
Show More
@@ -1,356 +1,362 b''
1 1 # narrowspec.py - methods for working with a narrow view of a repository
2 2 #
3 3 # Copyright 2017 Google, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8
9 9 from .i18n import _
10 10 from .pycompat import getattr
11 11 from . import (
12 12 error,
13 13 match as matchmod,
14 14 merge,
15 15 mergestate as mergestatemod,
16 16 requirements,
17 17 scmutil,
18 18 sparse,
19 19 util,
20 20 )
21 21
22 22 # The file in .hg/store/ that indicates which paths exit in the store
23 23 FILENAME = b'narrowspec'
24 24 # The file in .hg/ that indicates which paths exit in the dirstate
25 25 DIRSTATE_FILENAME = b'narrowspec.dirstate'
26 26
27 27 # Pattern prefixes that are allowed in narrow patterns. This list MUST
28 28 # only contain patterns that are fast and safe to evaluate. Keep in mind
29 29 # that patterns are supplied by clients and executed on remote servers
30 30 # as part of wire protocol commands. That means that changes to this
31 31 # data structure influence the wire protocol and should not be taken
32 32 # lightly - especially removals.
33 33 VALID_PREFIXES = (
34 34 b'path:',
35 35 b'rootfilesin:',
36 36 )
37 37
38 38
39 39 def normalizesplitpattern(kind, pat):
40 40 """Returns the normalized version of a pattern and kind.
41 41
42 42 Returns a tuple with the normalized kind and normalized pattern.
43 43 """
44 44 pat = pat.rstrip(b'/')
45 45 _validatepattern(pat)
46 46 return kind, pat
47 47
48 48
49 49 def _numlines(s):
50 50 """Returns the number of lines in s, including ending empty lines."""
51 51 # We use splitlines because it is Unicode-friendly and thus Python 3
52 52 # compatible. However, it does not count empty lines at the end, so trick
53 53 # it by adding a character at the end.
54 54 return len((s + b'x').splitlines())
55 55
56 56
57 57 def _validatepattern(pat):
58 58 """Validates the pattern and aborts if it is invalid.
59 59
60 60 Patterns are stored in the narrowspec as newline-separated
61 61 POSIX-style bytestring paths. There's no escaping.
62 62 """
63 63
64 64 # We use newlines as separators in the narrowspec file, so don't allow them
65 65 # in patterns.
66 66 if _numlines(pat) > 1:
67 67 raise error.Abort(_(b'newlines are not allowed in narrowspec paths'))
68 68
69 69 components = pat.split(b'/')
70 70 if b'.' in components or b'..' in components:
71 71 raise error.Abort(
72 72 _(b'"." and ".." are not allowed in narrowspec paths')
73 73 )
74 74
75 75
76 76 def normalizepattern(pattern, defaultkind=b'path'):
77 77 """Returns the normalized version of a text-format pattern.
78 78
79 79 If the pattern has no kind, the default will be added.
80 80 """
81 81 kind, pat = matchmod._patsplit(pattern, defaultkind)
82 82 return b'%s:%s' % normalizesplitpattern(kind, pat)
83 83
84 84
85 85 def parsepatterns(pats):
86 86 """Parses an iterable of patterns into a typed pattern set.
87 87
88 88 Patterns are assumed to be ``path:`` if no prefix is present.
89 89 For safety and performance reasons, only some prefixes are allowed.
90 90 See ``validatepatterns()``.
91 91
92 92 This function should be used on patterns that come from the user to
93 93 normalize and validate them to the internal data structure used for
94 94 representing patterns.
95 95 """
96 96 res = {normalizepattern(orig) for orig in pats}
97 97 validatepatterns(res)
98 98 return res
99 99
100 100
101 101 def validatepatterns(pats):
102 102 """Validate that patterns are in the expected data structure and format.
103 103
104 104 And that is a set of normalized patterns beginning with ``path:`` or
105 105 ``rootfilesin:``.
106 106
107 107 This function should be used to validate internal data structures
108 108 and patterns that are loaded from sources that use the internal,
109 109 prefixed pattern representation (but can't necessarily be fully trusted).
110 110 """
111 111 with util.timedcm('narrowspec.validatepatterns(pats size=%d)', len(pats)):
112 112 if not isinstance(pats, set):
113 113 raise error.ProgrammingError(
114 114 b'narrow patterns should be a set; got %r' % pats
115 115 )
116 116
117 117 for pat in pats:
118 118 if not pat.startswith(VALID_PREFIXES):
119 119 # Use a Mercurial exception because this can happen due to user
120 120 # bugs (e.g. manually updating spec file).
121 121 raise error.Abort(
122 122 _(b'invalid prefix on narrow pattern: %s') % pat,
123 123 hint=_(
124 124 b'narrow patterns must begin with one of '
125 125 b'the following: %s'
126 126 )
127 127 % b', '.join(VALID_PREFIXES),
128 128 )
129 129
130 130
131 131 def format(includes, excludes):
132 132 output = b'[include]\n'
133 133 for i in sorted(includes - excludes):
134 134 output += i + b'\n'
135 135 output += b'[exclude]\n'
136 136 for e in sorted(excludes):
137 137 output += e + b'\n'
138 138 return output
139 139
140 140
141 141 def match(root, include=None, exclude=None):
142 142 if not include:
143 143 # Passing empty include and empty exclude to matchmod.match()
144 144 # gives a matcher that matches everything, so explicitly use
145 145 # the nevermatcher.
146 146 return matchmod.never()
147 147 return matchmod.match(
148 148 root, b'', [], include=include or [], exclude=exclude or []
149 149 )
150 150
151 151
152 152 def parseconfig(ui, spec):
153 153 # maybe we should care about the profiles returned too
154 154 includepats, excludepats, profiles = sparse.parseconfig(ui, spec, b'narrow')
155 155 if profiles:
156 156 raise error.Abort(
157 157 _(
158 158 b"including other spec files using '%include' is not"
159 159 b" supported in narrowspec"
160 160 )
161 161 )
162 162
163 163 validatepatterns(includepats)
164 164 validatepatterns(excludepats)
165 165
166 166 return includepats, excludepats
167 167
168 168
169 169 def load(repo):
170 170 # Treat "narrowspec does not exist" the same as "narrowspec file exists
171 171 # and is empty".
172 172 spec = repo.svfs.tryread(FILENAME)
173 173 return parseconfig(repo.ui, spec)
174 174
175 175
176 176 def save(repo, includepats, excludepats):
177 177 validatepatterns(includepats)
178 178 validatepatterns(excludepats)
179 179 spec = format(includepats, excludepats)
180 180 repo.svfs.write(FILENAME, spec)
181 181
182 182
183 183 def copytoworkingcopy(repo):
184 184 spec = repo.svfs.read(FILENAME)
185 185 repo.vfs.write(DIRSTATE_FILENAME, spec)
186 186
187 187
188 188 def savebackup(repo, backupname):
189 189 if requirements.NARROW_REQUIREMENT not in repo.requirements:
190 190 return
191 191 svfs = repo.svfs
192 192 svfs.tryunlink(backupname)
193 193 util.copyfile(svfs.join(FILENAME), svfs.join(backupname), hardlink=True)
194 194
195 195
196 196 def restorebackup(repo, backupname):
197 197 if requirements.NARROW_REQUIREMENT not in repo.requirements:
198 198 return
199 199 util.rename(repo.svfs.join(backupname), repo.svfs.join(FILENAME))
200 200
201 201
202 202 def savewcbackup(repo, backupname):
203 203 if requirements.NARROW_REQUIREMENT not in repo.requirements:
204 204 return
205 205 vfs = repo.vfs
206 206 vfs.tryunlink(backupname)
207 207 # It may not exist in old repos
208 208 if vfs.exists(DIRSTATE_FILENAME):
209 209 util.copyfile(
210 210 vfs.join(DIRSTATE_FILENAME), vfs.join(backupname), hardlink=True
211 211 )
212 212
213 213
214 214 def restorewcbackup(repo, backupname):
215 215 if requirements.NARROW_REQUIREMENT not in repo.requirements:
216 216 return
217 217 # It may not exist in old repos
218 218 if repo.vfs.exists(backupname):
219 219 util.rename(repo.vfs.join(backupname), repo.vfs.join(DIRSTATE_FILENAME))
220 220
221 221
222 222 def clearwcbackup(repo, backupname):
223 223 if requirements.NARROW_REQUIREMENT not in repo.requirements:
224 224 return
225 225 repo.vfs.tryunlink(backupname)
226 226
227 227
228 228 def restrictpatterns(req_includes, req_excludes, repo_includes, repo_excludes):
229 229 r"""Restricts the patterns according to repo settings,
230 230 results in a logical AND operation
231 231
232 232 :param req_includes: requested includes
233 233 :param req_excludes: requested excludes
234 234 :param repo_includes: repo includes
235 235 :param repo_excludes: repo excludes
236 236 :return: include patterns, exclude patterns, and invalid include patterns.
237 237 """
238 238 res_excludes = set(req_excludes)
239 239 res_excludes.update(repo_excludes)
240 240 invalid_includes = []
241 241 if not req_includes:
242 242 res_includes = set(repo_includes)
243 243 elif b'path:.' not in repo_includes:
244 244 res_includes = []
245 245 for req_include in req_includes:
246 246 req_include = util.expandpath(util.normpath(req_include))
247 247 if req_include in repo_includes:
248 248 res_includes.append(req_include)
249 249 continue
250 250 valid = False
251 251 for repo_include in repo_includes:
252 252 if req_include.startswith(repo_include + b'/'):
253 253 valid = True
254 254 res_includes.append(req_include)
255 255 break
256 256 if not valid:
257 257 invalid_includes.append(req_include)
258 258 if len(res_includes) == 0:
259 259 res_excludes = {b'path:.'}
260 260 else:
261 261 res_includes = set(res_includes)
262 262 else:
263 263 res_includes = set(req_includes)
264 264 return res_includes, res_excludes, invalid_includes
265 265
266 266
267 267 # These two are extracted for extensions (specifically for Google's CitC file
268 268 # system)
269 269 def _deletecleanfiles(repo, files):
270 270 for f in files:
271 271 repo.wvfs.unlinkpath(f)
272 272
273 273
274 274 def _writeaddedfiles(repo, pctx, files):
275 275 mresult = merge.mergeresult()
276 276 mf = repo[b'.'].manifest()
277 277 for f in files:
278 278 if not repo.wvfs.exists(f):
279 279 mresult.addfile(
280 280 f,
281 281 mergestatemod.ACTION_GET,
282 282 (mf.flags(f), False),
283 283 b"narrowspec updated",
284 284 )
285 285 merge.applyupdates(
286 286 repo,
287 287 mresult,
288 288 wctx=repo[None],
289 289 mctx=repo[b'.'],
290 290 overwrite=False,
291 291 wantfiledata=False,
292 292 )
293 293
294 294
295 295 def checkworkingcopynarrowspec(repo):
296 296 # Avoid infinite recursion when updating the working copy
297 297 if getattr(repo, '_updatingnarrowspec', False):
298 298 return
299 299 storespec = repo.svfs.tryread(FILENAME)
300 300 wcspec = repo.vfs.tryread(DIRSTATE_FILENAME)
301 301 if wcspec != storespec:
302 302 raise error.StateError(
303 303 _(b"working copy's narrowspec is stale"),
304 304 hint=_(b"run 'hg tracked --update-working-copy'"),
305 305 )
306 306
307 307
308 308 def updateworkingcopy(repo, assumeclean=False):
309 309 """updates the working copy and dirstate from the store narrowspec
310 310
311 311 When assumeclean=True, files that are not known to be clean will also
312 312 be deleted. It is then up to the caller to make sure they are clean.
313 313 """
314 314 oldspec = repo.vfs.tryread(DIRSTATE_FILENAME)
315 315 newspec = repo.svfs.tryread(FILENAME)
316 316 repo._updatingnarrowspec = True
317 317
318 318 oldincludes, oldexcludes = parseconfig(repo.ui, oldspec)
319 319 newincludes, newexcludes = parseconfig(repo.ui, newspec)
320 320 oldmatch = match(repo.root, include=oldincludes, exclude=oldexcludes)
321 321 newmatch = match(repo.root, include=newincludes, exclude=newexcludes)
322 322 addedmatch = matchmod.differencematcher(newmatch, oldmatch)
323 323 removedmatch = matchmod.differencematcher(oldmatch, newmatch)
324 324
325 assert repo.currentwlock() is not None
325 326 ds = repo.dirstate
326 lookup, status, _mtime_boundary = ds.status(
327 removedmatch, subrepos=[], ignored=True, clean=True, unknown=True
328 )
327 with ds.running_status(repo):
328 lookup, status, _mtime_boundary = ds.status(
329 removedmatch,
330 subrepos=[],
331 ignored=True,
332 clean=True,
333 unknown=True,
334 )
329 335 trackeddirty = status.modified + status.added
330 336 clean = status.clean
331 337 if assumeclean:
332 338 clean.extend(lookup)
333 339 else:
334 340 trackeddirty.extend(lookup)
335 341 _deletecleanfiles(repo, clean)
336 342 uipathfn = scmutil.getuipathfn(repo)
337 343 for f in sorted(trackeddirty):
338 344 repo.ui.status(
339 345 _(b'not deleting possibly dirty file %s\n') % uipathfn(f)
340 346 )
341 347 for f in sorted(status.unknown):
342 348 repo.ui.status(_(b'not deleting unknown file %s\n') % uipathfn(f))
343 349 for f in sorted(status.ignored):
344 350 repo.ui.status(_(b'not deleting ignored file %s\n') % uipathfn(f))
345 351 for f in clean + trackeddirty:
346 352 ds.update_file(f, p1_tracked=False, wc_tracked=False)
347 353
348 354 pctx = repo[b'.']
349 355
350 356 # only update added files that are in the sparse checkout
351 357 addedmatch = matchmod.intersectmatchers(addedmatch, sparse.matcher(repo))
352 358 newfiles = [f for f in pctx.manifest().walk(addedmatch) if f not in ds]
353 359 for f in newfiles:
354 360 ds.update_file(f, p1_tracked=True, wc_tracked=True, possibly_dirty=True)
355 361 _writeaddedfiles(repo, pctx, newfiles)
356 362 repo._updatingnarrowspec = False
General Comments 0
You need to be logged in to leave comments. Login now