##// END OF EJS Templates
repoview: move function for computing filtered hash...
Gregory Szorc -
r24723:467a3314 default
parent child Browse files
Show More
@@ -1,460 +1,441 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev
9 9 import encoding
10 import scmutil
10 11 import util
11 12 import time
12 13 from array import array
13 14 from struct import calcsize, pack, unpack
14 15
15 16 def _filename(repo):
16 17 """name of a branchcache file for a given repo or repoview"""
17 18 filename = "cache/branch2"
18 19 if repo.filtername:
19 20 filename = '%s-%s' % (filename, repo.filtername)
20 21 return filename
21 22
22 23 def read(repo):
23 24 try:
24 25 f = repo.vfs(_filename(repo))
25 26 lines = f.read().split('\n')
26 27 f.close()
27 28 except (IOError, OSError):
28 29 return None
29 30
30 31 try:
31 32 cachekey = lines.pop(0).split(" ", 2)
32 33 last, lrev = cachekey[:2]
33 34 last, lrev = bin(last), int(lrev)
34 35 filteredhash = None
35 36 if len(cachekey) > 2:
36 37 filteredhash = bin(cachekey[2])
37 38 partial = branchcache(tipnode=last, tiprev=lrev,
38 39 filteredhash=filteredhash)
39 40 if not partial.validfor(repo):
40 41 # invalidate the cache
41 42 raise ValueError('tip differs')
42 43 for l in lines:
43 44 if not l:
44 45 continue
45 46 node, state, label = l.split(" ", 2)
46 47 if state not in 'oc':
47 48 raise ValueError('invalid branch state')
48 49 label = encoding.tolocal(label.strip())
49 50 if not node in repo:
50 51 raise ValueError('node %s does not exist' % node)
51 52 node = bin(node)
52 53 partial.setdefault(label, []).append(node)
53 54 if state == 'c':
54 55 partial._closednodes.add(node)
55 56 except KeyboardInterrupt:
56 57 raise
57 58 except Exception, inst:
58 59 if repo.ui.debugflag:
59 60 msg = 'invalid branchheads cache'
60 61 if repo.filtername is not None:
61 62 msg += ' (%s)' % repo.filtername
62 63 msg += ': %s\n'
63 64 repo.ui.debug(msg % inst)
64 65 partial = None
65 66 return partial
66 67
67 68 ### Nearest subset relation
68 69 # Nearest subset of filter X is a filter Y so that:
69 70 # * Y is included in X,
70 71 # * X - Y is as small as possible.
71 72 # This create and ordering used for branchmap purpose.
72 73 # the ordering may be partial
73 74 subsettable = {None: 'visible',
74 75 'visible': 'served',
75 76 'served': 'immutable',
76 77 'immutable': 'base'}
77 78
78 79 def updatecache(repo):
79 80 cl = repo.changelog
80 81 filtername = repo.filtername
81 82 partial = repo._branchcaches.get(filtername)
82 83
83 84 revs = []
84 85 if partial is None or not partial.validfor(repo):
85 86 partial = read(repo)
86 87 if partial is None:
87 88 subsetname = subsettable.get(filtername)
88 89 if subsetname is None:
89 90 partial = branchcache()
90 91 else:
91 92 subset = repo.filtered(subsetname)
92 93 partial = subset.branchmap().copy()
93 94 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
94 95 revs.extend(r for r in extrarevs if r <= partial.tiprev)
95 96 revs.extend(cl.revs(start=partial.tiprev + 1))
96 97 if revs:
97 98 partial.update(repo, revs)
98 99 partial.write(repo)
99 100
100 101 assert partial.validfor(repo), filtername
101 102 repo._branchcaches[repo.filtername] = partial
102 103
103 104 class branchcache(dict):
104 105 """A dict like object that hold branches heads cache.
105 106
106 107 This cache is used to avoid costly computations to determine all the
107 108 branch heads of a repo.
108 109
109 110 The cache is serialized on disk in the following format:
110 111
111 112 <tip hex node> <tip rev number> [optional filtered repo hex hash]
112 113 <branch head hex node> <open/closed state> <branch name>
113 114 <branch head hex node> <open/closed state> <branch name>
114 115 ...
115 116
116 117 The first line is used to check if the cache is still valid. If the
117 118 branch cache is for a filtered repo view, an optional third hash is
118 119 included that hashes the hashes of all filtered revisions.
119 120
120 121 The open/closed state is represented by a single letter 'o' or 'c'.
121 122 This field can be used to avoid changelog reads when determining if a
122 123 branch head closes a branch or not.
123 124 """
124 125
125 126 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
126 127 filteredhash=None, closednodes=None):
127 128 super(branchcache, self).__init__(entries)
128 129 self.tipnode = tipnode
129 130 self.tiprev = tiprev
130 131 self.filteredhash = filteredhash
131 132 # closednodes is a set of nodes that close their branch. If the branch
132 133 # cache has been updated, it may contain nodes that are no longer
133 134 # heads.
134 135 if closednodes is None:
135 136 self._closednodes = set()
136 137 else:
137 138 self._closednodes = closednodes
138 139
139 def _hashfiltered(self, repo):
140 """build hash of revision filtered in the current cache
141
142 Tracking tipnode and tiprev is not enough to ensure validity of the
143 cache as they do not help to distinct cache that ignored various
144 revision bellow tiprev.
145
146 To detect such difference, we build a cache of all ignored revisions.
147 """
148 cl = repo.changelog
149 if not cl.filteredrevs:
150 return None
151 key = None
152 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
153 if revs:
154 s = util.sha1()
155 for rev in revs:
156 s.update('%s;' % rev)
157 key = s.digest()
158 return key
159
160 140 def validfor(self, repo):
161 141 """Is the cache content valid regarding a repo
162 142
163 143 - False when cached tipnode is unknown or if we detect a strip.
164 144 - True when cache is up to date or a subset of current repo."""
165 145 try:
166 146 return ((self.tipnode == repo.changelog.node(self.tiprev))
167 and (self.filteredhash == self._hashfiltered(repo)))
147 and (self.filteredhash == \
148 scmutil.filteredhash(repo, self.tiprev)))
168 149 except IndexError:
169 150 return False
170 151
171 152 def _branchtip(self, heads):
172 153 '''Return tuple with last open head in heads and false,
173 154 otherwise return last closed head and true.'''
174 155 tip = heads[-1]
175 156 closed = True
176 157 for h in reversed(heads):
177 158 if h not in self._closednodes:
178 159 tip = h
179 160 closed = False
180 161 break
181 162 return tip, closed
182 163
183 164 def branchtip(self, branch):
184 165 '''Return the tipmost open head on branch head, otherwise return the
185 166 tipmost closed head on branch.
186 167 Raise KeyError for unknown branch.'''
187 168 return self._branchtip(self[branch])[0]
188 169
189 170 def branchheads(self, branch, closed=False):
190 171 heads = self[branch]
191 172 if not closed:
192 173 heads = [h for h in heads if h not in self._closednodes]
193 174 return heads
194 175
195 176 def iterbranches(self):
196 177 for bn, heads in self.iteritems():
197 178 yield (bn, heads) + self._branchtip(heads)
198 179
199 180 def copy(self):
200 181 """return an deep copy of the branchcache object"""
201 182 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
202 183 self._closednodes)
203 184
204 185 def write(self, repo):
205 186 try:
206 187 f = repo.vfs(_filename(repo), "w", atomictemp=True)
207 188 cachekey = [hex(self.tipnode), str(self.tiprev)]
208 189 if self.filteredhash is not None:
209 190 cachekey.append(hex(self.filteredhash))
210 191 f.write(" ".join(cachekey) + '\n')
211 192 nodecount = 0
212 193 for label, nodes in sorted(self.iteritems()):
213 194 for node in nodes:
214 195 nodecount += 1
215 196 if node in self._closednodes:
216 197 state = 'c'
217 198 else:
218 199 state = 'o'
219 200 f.write("%s %s %s\n" % (hex(node), state,
220 201 encoding.fromlocal(label)))
221 202 f.close()
222 203 repo.ui.log('branchcache',
223 204 'wrote %s branch cache with %d labels and %d nodes\n',
224 205 repo.filtername, len(self), nodecount)
225 206 except (IOError, OSError, util.Abort), inst:
226 207 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
227 208 # Abort may be raise by read only opener
228 209 pass
229 210
230 211 def update(self, repo, revgen):
231 212 """Given a branchhead cache, self, that may have extra nodes or be
232 213 missing heads, and a generator of nodes that are strictly a superset of
233 214 heads missing, this function updates self to be correct.
234 215 """
235 216 starttime = time.time()
236 217 cl = repo.changelog
237 218 # collect new branch entries
238 219 newbranches = {}
239 220 getbranchinfo = repo.revbranchcache().branchinfo
240 221 for r in revgen:
241 222 branch, closesbranch = getbranchinfo(r)
242 223 newbranches.setdefault(branch, []).append(r)
243 224 if closesbranch:
244 225 self._closednodes.add(cl.node(r))
245 226
246 227 # fetch current topological heads to speed up filtering
247 228 topoheads = set(cl.headrevs())
248 229
249 230 # if older branchheads are reachable from new ones, they aren't
250 231 # really branchheads. Note checking parents is insufficient:
251 232 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
252 233 for branch, newheadrevs in newbranches.iteritems():
253 234 bheads = self.setdefault(branch, [])
254 235 bheadset = set(cl.rev(node) for node in bheads)
255 236
256 237 # This have been tested True on all internal usage of this function.
257 238 # run it again in case of doubt
258 239 # assert not (set(bheadrevs) & set(newheadrevs))
259 240 newheadrevs.sort()
260 241 bheadset.update(newheadrevs)
261 242
262 243 # This prunes out two kinds of heads - heads that are superseded by
263 244 # a head in newheadrevs, and newheadrevs that are not heads because
264 245 # an existing head is their descendant.
265 246 uncertain = bheadset - topoheads
266 247 if uncertain:
267 248 floorrev = min(uncertain)
268 249 ancestors = set(cl.ancestors(newheadrevs, floorrev))
269 250 bheadset -= ancestors
270 251 bheadrevs = sorted(bheadset)
271 252 self[branch] = [cl.node(rev) for rev in bheadrevs]
272 253 tiprev = bheadrevs[-1]
273 254 if tiprev > self.tiprev:
274 255 self.tipnode = cl.node(tiprev)
275 256 self.tiprev = tiprev
276 257
277 258 if not self.validfor(repo):
278 259 # cache key are not valid anymore
279 260 self.tipnode = nullid
280 261 self.tiprev = nullrev
281 262 for heads in self.values():
282 263 tiprev = max(cl.rev(node) for node in heads)
283 264 if tiprev > self.tiprev:
284 265 self.tipnode = cl.node(tiprev)
285 266 self.tiprev = tiprev
286 self.filteredhash = self._hashfiltered(repo)
267 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
287 268
288 269 duration = time.time() - starttime
289 270 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
290 271 repo.filtername, duration)
291 272
292 273 # Revision branch info cache
293 274
294 275 _rbcversion = '-v1'
295 276 _rbcnames = 'cache/rbc-names' + _rbcversion
296 277 _rbcrevs = 'cache/rbc-revs' + _rbcversion
297 278 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
298 279 _rbcrecfmt = '>4sI'
299 280 _rbcrecsize = calcsize(_rbcrecfmt)
300 281 _rbcnodelen = 4
301 282 _rbcbranchidxmask = 0x7fffffff
302 283 _rbccloseflag = 0x80000000
303 284
304 285 class revbranchcache(object):
305 286 """Persistent cache, mapping from revision number to branch name and close.
306 287 This is a low level cache, independent of filtering.
307 288
308 289 Branch names are stored in rbc-names in internal encoding separated by 0.
309 290 rbc-names is append-only, and each branch name is only stored once and will
310 291 thus have a unique index.
311 292
312 293 The branch info for each revision is stored in rbc-revs as constant size
313 294 records. The whole file is read into memory, but it is only 'parsed' on
314 295 demand. The file is usually append-only but will be truncated if repo
315 296 modification is detected.
316 297 The record for each revision contains the first 4 bytes of the
317 298 corresponding node hash, and the record is only used if it still matches.
318 299 Even a completely trashed rbc-revs fill thus still give the right result
319 300 while converging towards full recovery ... assuming no incorrectly matching
320 301 node hashes.
321 302 The record also contains 4 bytes where 31 bits contains the index of the
322 303 branch and the last bit indicate that it is a branch close commit.
323 304 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
324 305 and will grow with it but be 1/8th of its size.
325 306 """
326 307
327 308 def __init__(self, repo, readonly=True):
328 309 assert repo.filtername is None
329 310 self._repo = repo
330 311 self._names = [] # branch names in local encoding with static index
331 312 self._rbcrevs = array('c') # structs of type _rbcrecfmt
332 313 self._rbcsnameslen = 0
333 314 try:
334 315 bndata = repo.vfs.read(_rbcnames)
335 316 self._rbcsnameslen = len(bndata) # for verification before writing
336 317 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
337 318 except (IOError, OSError), inst:
338 319 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
339 320 inst)
340 321 if readonly:
341 322 # don't try to use cache - fall back to the slow path
342 323 self.branchinfo = self._branchinfo
343 324
344 325 if self._names:
345 326 try:
346 327 data = repo.vfs.read(_rbcrevs)
347 328 self._rbcrevs.fromstring(data)
348 329 except (IOError, OSError), inst:
349 330 repo.ui.debug("couldn't read revision branch cache: %s\n" %
350 331 inst)
351 332 # remember number of good records on disk
352 333 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
353 334 len(repo.changelog))
354 335 if self._rbcrevslen == 0:
355 336 self._names = []
356 337 self._rbcnamescount = len(self._names) # number of good names on disk
357 338 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
358 339
359 340 def branchinfo(self, rev):
360 341 """Return branch name and close flag for rev, using and updating
361 342 persistent cache."""
362 343 changelog = self._repo.changelog
363 344 rbcrevidx = rev * _rbcrecsize
364 345
365 346 # if requested rev is missing, add and populate all missing revs
366 347 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
367 348 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
368 349 len(self._rbcrevs)))
369 350
370 351 # fast path: extract data from cache, use it if node is matching
371 352 reponode = changelog.node(rev)[:_rbcnodelen]
372 353 cachenode, branchidx = unpack(
373 354 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
374 355 close = bool(branchidx & _rbccloseflag)
375 356 if close:
376 357 branchidx &= _rbcbranchidxmask
377 358 if cachenode == '\0\0\0\0':
378 359 pass
379 360 elif cachenode == reponode:
380 361 return self._names[branchidx], close
381 362 else:
382 363 # rev/node map has changed, invalidate the cache from here up
383 364 truncate = rbcrevidx + _rbcrecsize
384 365 del self._rbcrevs[truncate:]
385 366 self._rbcrevslen = min(self._rbcrevslen, truncate)
386 367
387 368 # fall back to slow path and make sure it will be written to disk
388 369 return self._branchinfo(rev)
389 370
390 371 def _branchinfo(self, rev):
391 372 """Retrieve branch info from changelog and update _rbcrevs"""
392 373 changelog = self._repo.changelog
393 374 b, close = changelog.branchinfo(rev)
394 375 if b in self._namesreverse:
395 376 branchidx = self._namesreverse[b]
396 377 else:
397 378 branchidx = len(self._names)
398 379 self._names.append(b)
399 380 self._namesreverse[b] = branchidx
400 381 reponode = changelog.node(rev)
401 382 if close:
402 383 branchidx |= _rbccloseflag
403 384 self._setcachedata(rev, reponode, branchidx)
404 385 return b, close
405 386
406 387 def _setcachedata(self, rev, node, branchidx):
407 388 """Writes the node's branch data to the in-memory cache data."""
408 389 rbcrevidx = rev * _rbcrecsize
409 390 rec = array('c')
410 391 rec.fromstring(pack(_rbcrecfmt, node, branchidx))
411 392 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
412 393 self._rbcrevslen = min(self._rbcrevslen, rev)
413 394
414 395 tr = self._repo.currenttransaction()
415 396 if tr:
416 397 tr.addfinalize('write-revbranchcache', self.write)
417 398
418 399 def write(self, tr=None):
419 400 """Save branch cache if it is dirty."""
420 401 repo = self._repo
421 402 if self._rbcnamescount < len(self._names):
422 403 try:
423 404 if self._rbcnamescount != 0:
424 405 f = repo.vfs.open(_rbcnames, 'ab')
425 406 if f.tell() == self._rbcsnameslen:
426 407 f.write('\0')
427 408 else:
428 409 f.close()
429 410 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
430 411 self._rbcnamescount = 0
431 412 self._rbcrevslen = 0
432 413 if self._rbcnamescount == 0:
433 414 f = repo.vfs.open(_rbcnames, 'wb')
434 415 f.write('\0'.join(encoding.fromlocal(b)
435 416 for b in self._names[self._rbcnamescount:]))
436 417 self._rbcsnameslen = f.tell()
437 418 f.close()
438 419 except (IOError, OSError, util.Abort), inst:
439 420 repo.ui.debug("couldn't write revision branch cache names: "
440 421 "%s\n" % inst)
441 422 return
442 423 self._rbcnamescount = len(self._names)
443 424
444 425 start = self._rbcrevslen * _rbcrecsize
445 426 if start != len(self._rbcrevs):
446 427 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
447 428 try:
448 429 f = repo.vfs.open(_rbcrevs, 'ab')
449 430 if f.tell() != start:
450 431 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
451 432 f.seek(start)
452 433 f.truncate()
453 434 end = revs * _rbcrecsize
454 435 f.write(self._rbcrevs[start:end])
455 436 f.close()
456 437 except (IOError, OSError, util.Abort), inst:
457 438 repo.ui.debug("couldn't write revision branch cache: %s\n" %
458 439 inst)
459 440 return
460 441 self._rbcrevslen = revs
@@ -1,1110 +1,1134 b''
1 1 # scmutil.py - Mercurial core utility functions
2 2 #
3 3 # Copyright Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from mercurial.node import nullrev
10 10 import util, error, osutil, revset, similar, encoding, phases
11 11 import pathutil
12 12 import match as matchmod
13 13 import os, errno, re, glob, tempfile, shutil, stat
14 14
15 15 if os.name == 'nt':
16 16 import scmwindows as scmplatform
17 17 else:
18 18 import scmposix as scmplatform
19 19
20 20 systemrcpath = scmplatform.systemrcpath
21 21 userrcpath = scmplatform.userrcpath
22 22
23 23 class status(tuple):
24 24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 25 and 'ignored' properties are only relevant to the working copy.
26 26 '''
27 27
28 28 __slots__ = ()
29 29
30 30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 31 clean):
32 32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 33 ignored, clean))
34 34
35 35 @property
36 36 def modified(self):
37 37 '''files that have been modified'''
38 38 return self[0]
39 39
40 40 @property
41 41 def added(self):
42 42 '''files that have been added'''
43 43 return self[1]
44 44
45 45 @property
46 46 def removed(self):
47 47 '''files that have been removed'''
48 48 return self[2]
49 49
50 50 @property
51 51 def deleted(self):
52 52 '''files that are in the dirstate, but have been deleted from the
53 53 working copy (aka "missing")
54 54 '''
55 55 return self[3]
56 56
57 57 @property
58 58 def unknown(self):
59 59 '''files not in the dirstate that are not ignored'''
60 60 return self[4]
61 61
62 62 @property
63 63 def ignored(self):
64 64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 65 return self[5]
66 66
67 67 @property
68 68 def clean(self):
69 69 '''files that have not been modified'''
70 70 return self[6]
71 71
72 72 def __repr__(self, *args, **kwargs):
73 73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 74 'unknown=%r, ignored=%r, clean=%r>') % self)
75 75
76 76 def itersubrepos(ctx1, ctx2):
77 77 """find subrepos in ctx1 or ctx2"""
78 78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 83 for subpath, ctx in sorted(subpaths.iteritems()):
84 84 yield subpath, ctx.sub(subpath)
85 85
86 86 def nochangesfound(ui, repo, excluded=None):
87 87 '''Report no changes for push/pull, excluded is None or a list of
88 88 nodes excluded from the push/pull.
89 89 '''
90 90 secretlist = []
91 91 if excluded:
92 92 for n in excluded:
93 93 if n not in repo:
94 94 # discovery should not have included the filtered revision,
95 95 # we have to explicitly exclude it until discovery is cleanup.
96 96 continue
97 97 ctx = repo[n]
98 98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 99 secretlist.append(n)
100 100
101 101 if secretlist:
102 102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 103 % len(secretlist))
104 104 else:
105 105 ui.status(_("no changes found\n"))
106 106
107 107 def checknewlabel(repo, lbl, kind):
108 108 # Do not use the "kind" parameter in ui output.
109 109 # It makes strings difficult to translate.
110 110 if lbl in ['tip', '.', 'null']:
111 111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 112 for c in (':', '\0', '\n', '\r'):
113 113 if c in lbl:
114 114 raise util.Abort(_("%r cannot be used in a name") % c)
115 115 try:
116 116 int(lbl)
117 117 raise util.Abort(_("cannot use an integer as a name"))
118 118 except ValueError:
119 119 pass
120 120
121 121 def checkfilename(f):
122 122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 123 if '\r' in f or '\n' in f:
124 124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125 125
126 126 def checkportable(ui, f):
127 127 '''Check if filename f is portable and warn or abort depending on config'''
128 128 checkfilename(f)
129 129 abort, warn = checkportabilityalert(ui)
130 130 if abort or warn:
131 131 msg = util.checkwinfilename(f)
132 132 if msg:
133 133 msg = "%s: %r" % (msg, f)
134 134 if abort:
135 135 raise util.Abort(msg)
136 136 ui.warn(_("warning: %s\n") % msg)
137 137
138 138 def checkportabilityalert(ui):
139 139 '''check if the user's config requests nothing, a warning, or abort for
140 140 non-portable filenames'''
141 141 val = ui.config('ui', 'portablefilenames', 'warn')
142 142 lval = val.lower()
143 143 bval = util.parsebool(val)
144 144 abort = os.name == 'nt' or lval == 'abort'
145 145 warn = bval or lval == 'warn'
146 146 if bval is None and not (warn or abort or lval == 'ignore'):
147 147 raise error.ConfigError(
148 148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 149 return abort, warn
150 150
151 151 class casecollisionauditor(object):
152 152 def __init__(self, ui, abort, dirstate):
153 153 self._ui = ui
154 154 self._abort = abort
155 155 allfiles = '\0'.join(dirstate._map)
156 156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 157 self._dirstate = dirstate
158 158 # The purpose of _newfiles is so that we don't complain about
159 159 # case collisions if someone were to call this object with the
160 160 # same filename twice.
161 161 self._newfiles = set()
162 162
163 163 def __call__(self, f):
164 164 if f in self._newfiles:
165 165 return
166 166 fl = encoding.lower(f)
167 167 if fl in self._loweredfiles and f not in self._dirstate:
168 168 msg = _('possible case-folding collision for %s') % f
169 169 if self._abort:
170 170 raise util.Abort(msg)
171 171 self._ui.warn(_("warning: %s\n") % msg)
172 172 self._loweredfiles.add(fl)
173 173 self._newfiles.add(f)
174 174
175 def filteredhash(repo, maxrev):
176 """build hash of filtered revisions in the current repoview.
177
178 Multiple caches perform up-to-date validation by checking that the
179 tiprev and tipnode stored in the cache file match the current repository.
180 However, this is not sufficient for validating repoviews because the set
181 of revisions in the view may change without the repository tiprev and
182 tipnode changing.
183
184 This function hashes all the revs filtered from the view and returns
185 that SHA-1 digest.
186 """
187 cl = repo.changelog
188 if not cl.filteredrevs:
189 return None
190 key = None
191 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
192 if revs:
193 s = util.sha1()
194 for rev in revs:
195 s.update('%s;' % rev)
196 key = s.digest()
197 return key
198
175 199 class abstractvfs(object):
176 200 """Abstract base class; cannot be instantiated"""
177 201
178 202 def __init__(self, *args, **kwargs):
179 203 '''Prevent instantiation; don't call this from subclasses.'''
180 204 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181 205
182 206 def tryread(self, path):
183 207 '''gracefully return an empty string for missing files'''
184 208 try:
185 209 return self.read(path)
186 210 except IOError, inst:
187 211 if inst.errno != errno.ENOENT:
188 212 raise
189 213 return ""
190 214
191 215 def tryreadlines(self, path, mode='rb'):
192 216 '''gracefully return an empty array for missing files'''
193 217 try:
194 218 return self.readlines(path, mode=mode)
195 219 except IOError, inst:
196 220 if inst.errno != errno.ENOENT:
197 221 raise
198 222 return []
199 223
200 224 def open(self, path, mode="r", text=False, atomictemp=False,
201 225 notindexed=False):
202 226 '''Open ``path`` file, which is relative to vfs root.
203 227
204 228 Newly created directories are marked as "not to be indexed by
205 229 the content indexing service", if ``notindexed`` is specified
206 230 for "write" mode access.
207 231 '''
208 232 self.open = self.__call__
209 233 return self.__call__(path, mode, text, atomictemp, notindexed)
210 234
211 235 def read(self, path):
212 236 fp = self(path, 'rb')
213 237 try:
214 238 return fp.read()
215 239 finally:
216 240 fp.close()
217 241
218 242 def readlines(self, path, mode='rb'):
219 243 fp = self(path, mode=mode)
220 244 try:
221 245 return fp.readlines()
222 246 finally:
223 247 fp.close()
224 248
225 249 def write(self, path, data):
226 250 fp = self(path, 'wb')
227 251 try:
228 252 return fp.write(data)
229 253 finally:
230 254 fp.close()
231 255
232 256 def writelines(self, path, data, mode='wb', notindexed=False):
233 257 fp = self(path, mode=mode, notindexed=notindexed)
234 258 try:
235 259 return fp.writelines(data)
236 260 finally:
237 261 fp.close()
238 262
239 263 def append(self, path, data):
240 264 fp = self(path, 'ab')
241 265 try:
242 266 return fp.write(data)
243 267 finally:
244 268 fp.close()
245 269
246 270 def chmod(self, path, mode):
247 271 return os.chmod(self.join(path), mode)
248 272
249 273 def exists(self, path=None):
250 274 return os.path.exists(self.join(path))
251 275
252 276 def fstat(self, fp):
253 277 return util.fstat(fp)
254 278
255 279 def isdir(self, path=None):
256 280 return os.path.isdir(self.join(path))
257 281
258 282 def isfile(self, path=None):
259 283 return os.path.isfile(self.join(path))
260 284
261 285 def islink(self, path=None):
262 286 return os.path.islink(self.join(path))
263 287
264 288 def reljoin(self, *paths):
265 289 """join various elements of a path together (as os.path.join would do)
266 290
267 291 The vfs base is not injected so that path stay relative. This exists
268 292 to allow handling of strange encoding if needed."""
269 293 return os.path.join(*paths)
270 294
271 295 def split(self, path):
272 296 """split top-most element of a path (as os.path.split would do)
273 297
274 298 This exists to allow handling of strange encoding if needed."""
275 299 return os.path.split(path)
276 300
277 301 def lexists(self, path=None):
278 302 return os.path.lexists(self.join(path))
279 303
280 304 def lstat(self, path=None):
281 305 return os.lstat(self.join(path))
282 306
283 307 def listdir(self, path=None):
284 308 return os.listdir(self.join(path))
285 309
286 310 def makedir(self, path=None, notindexed=True):
287 311 return util.makedir(self.join(path), notindexed)
288 312
289 313 def makedirs(self, path=None, mode=None):
290 314 return util.makedirs(self.join(path), mode)
291 315
292 316 def makelock(self, info, path):
293 317 return util.makelock(info, self.join(path))
294 318
295 319 def mkdir(self, path=None):
296 320 return os.mkdir(self.join(path))
297 321
298 322 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
299 323 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
300 324 dir=self.join(dir), text=text)
301 325 dname, fname = util.split(name)
302 326 if dir:
303 327 return fd, os.path.join(dir, fname)
304 328 else:
305 329 return fd, fname
306 330
307 331 def readdir(self, path=None, stat=None, skip=None):
308 332 return osutil.listdir(self.join(path), stat, skip)
309 333
310 334 def readlock(self, path):
311 335 return util.readlock(self.join(path))
312 336
313 337 def rename(self, src, dst):
314 338 return util.rename(self.join(src), self.join(dst))
315 339
316 340 def readlink(self, path):
317 341 return os.readlink(self.join(path))
318 342
319 343 def removedirs(self, path=None):
320 344 """Remove a leaf directory and all empty intermediate ones
321 345 """
322 346 return util.removedirs(self.join(path))
323 347
324 348 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
325 349 """Remove a directory tree recursively
326 350
327 351 If ``forcibly``, this tries to remove READ-ONLY files, too.
328 352 """
329 353 if forcibly:
330 354 def onerror(function, path, excinfo):
331 355 if function is not os.remove:
332 356 raise
333 357 # read-only files cannot be unlinked under Windows
334 358 s = os.stat(path)
335 359 if (s.st_mode & stat.S_IWRITE) != 0:
336 360 raise
337 361 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
338 362 os.remove(path)
339 363 else:
340 364 onerror = None
341 365 return shutil.rmtree(self.join(path),
342 366 ignore_errors=ignore_errors, onerror=onerror)
343 367
344 368 def setflags(self, path, l, x):
345 369 return util.setflags(self.join(path), l, x)
346 370
347 371 def stat(self, path=None):
348 372 return os.stat(self.join(path))
349 373
350 374 def unlink(self, path=None):
351 375 return util.unlink(self.join(path))
352 376
353 377 def unlinkpath(self, path=None, ignoremissing=False):
354 378 return util.unlinkpath(self.join(path), ignoremissing)
355 379
356 380 def utime(self, path=None, t=None):
357 381 return os.utime(self.join(path), t)
358 382
359 383 class vfs(abstractvfs):
360 384 '''Operate files relative to a base directory
361 385
362 386 This class is used to hide the details of COW semantics and
363 387 remote file access from higher level code.
364 388 '''
365 389 def __init__(self, base, audit=True, expandpath=False, realpath=False):
366 390 if expandpath:
367 391 base = util.expandpath(base)
368 392 if realpath:
369 393 base = os.path.realpath(base)
370 394 self.base = base
371 395 self._setmustaudit(audit)
372 396 self.createmode = None
373 397 self._trustnlink = None
374 398
375 399 def _getmustaudit(self):
376 400 return self._audit
377 401
378 402 def _setmustaudit(self, onoff):
379 403 self._audit = onoff
380 404 if onoff:
381 405 self.audit = pathutil.pathauditor(self.base)
382 406 else:
383 407 self.audit = util.always
384 408
385 409 mustaudit = property(_getmustaudit, _setmustaudit)
386 410
387 411 @util.propertycache
388 412 def _cansymlink(self):
389 413 return util.checklink(self.base)
390 414
391 415 @util.propertycache
392 416 def _chmod(self):
393 417 return util.checkexec(self.base)
394 418
395 419 def _fixfilemode(self, name):
396 420 if self.createmode is None or not self._chmod:
397 421 return
398 422 os.chmod(name, self.createmode & 0666)
399 423
400 424 def __call__(self, path, mode="r", text=False, atomictemp=False,
401 425 notindexed=False):
402 426 '''Open ``path`` file, which is relative to vfs root.
403 427
404 428 Newly created directories are marked as "not to be indexed by
405 429 the content indexing service", if ``notindexed`` is specified
406 430 for "write" mode access.
407 431 '''
408 432 if self._audit:
409 433 r = util.checkosfilename(path)
410 434 if r:
411 435 raise util.Abort("%s: %r" % (r, path))
412 436 self.audit(path)
413 437 f = self.join(path)
414 438
415 439 if not text and "b" not in mode:
416 440 mode += "b" # for that other OS
417 441
418 442 nlink = -1
419 443 if mode not in ('r', 'rb'):
420 444 dirname, basename = util.split(f)
421 445 # If basename is empty, then the path is malformed because it points
422 446 # to a directory. Let the posixfile() call below raise IOError.
423 447 if basename:
424 448 if atomictemp:
425 449 util.ensuredirs(dirname, self.createmode, notindexed)
426 450 return util.atomictempfile(f, mode, self.createmode)
427 451 try:
428 452 if 'w' in mode:
429 453 util.unlink(f)
430 454 nlink = 0
431 455 else:
432 456 # nlinks() may behave differently for files on Windows
433 457 # shares if the file is open.
434 458 fd = util.posixfile(f)
435 459 nlink = util.nlinks(f)
436 460 if nlink < 1:
437 461 nlink = 2 # force mktempcopy (issue1922)
438 462 fd.close()
439 463 except (OSError, IOError), e:
440 464 if e.errno != errno.ENOENT:
441 465 raise
442 466 nlink = 0
443 467 util.ensuredirs(dirname, self.createmode, notindexed)
444 468 if nlink > 0:
445 469 if self._trustnlink is None:
446 470 self._trustnlink = nlink > 1 or util.checknlink(f)
447 471 if nlink > 1 or not self._trustnlink:
448 472 util.rename(util.mktempcopy(f), f)
449 473 fp = util.posixfile(f, mode)
450 474 if nlink == 0:
451 475 self._fixfilemode(f)
452 476 return fp
453 477
454 478 def symlink(self, src, dst):
455 479 self.audit(dst)
456 480 linkname = self.join(dst)
457 481 try:
458 482 os.unlink(linkname)
459 483 except OSError:
460 484 pass
461 485
462 486 util.ensuredirs(os.path.dirname(linkname), self.createmode)
463 487
464 488 if self._cansymlink:
465 489 try:
466 490 os.symlink(src, linkname)
467 491 except OSError, err:
468 492 raise OSError(err.errno, _('could not symlink to %r: %s') %
469 493 (src, err.strerror), linkname)
470 494 else:
471 495 self.write(dst, src)
472 496
473 497 def join(self, path, *insidef):
474 498 if path:
475 499 return os.path.join(self.base, path, *insidef)
476 500 else:
477 501 return self.base
478 502
479 503 opener = vfs
480 504
481 505 class auditvfs(object):
482 506 def __init__(self, vfs):
483 507 self.vfs = vfs
484 508
485 509 def _getmustaudit(self):
486 510 return self.vfs.mustaudit
487 511
488 512 def _setmustaudit(self, onoff):
489 513 self.vfs.mustaudit = onoff
490 514
491 515 mustaudit = property(_getmustaudit, _setmustaudit)
492 516
493 517 class filtervfs(abstractvfs, auditvfs):
494 518 '''Wrapper vfs for filtering filenames with a function.'''
495 519
496 520 def __init__(self, vfs, filter):
497 521 auditvfs.__init__(self, vfs)
498 522 self._filter = filter
499 523
500 524 def __call__(self, path, *args, **kwargs):
501 525 return self.vfs(self._filter(path), *args, **kwargs)
502 526
503 527 def join(self, path, *insidef):
504 528 if path:
505 529 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
506 530 else:
507 531 return self.vfs.join(path)
508 532
509 533 filteropener = filtervfs
510 534
511 535 class readonlyvfs(abstractvfs, auditvfs):
512 536 '''Wrapper vfs preventing any writing.'''
513 537
514 538 def __init__(self, vfs):
515 539 auditvfs.__init__(self, vfs)
516 540
517 541 def __call__(self, path, mode='r', *args, **kw):
518 542 if mode not in ('r', 'rb'):
519 543 raise util.Abort('this vfs is read only')
520 544 return self.vfs(path, mode, *args, **kw)
521 545
522 546
523 547 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
524 548 '''yield every hg repository under path, always recursively.
525 549 The recurse flag will only control recursion into repo working dirs'''
526 550 def errhandler(err):
527 551 if err.filename == path:
528 552 raise err
529 553 samestat = getattr(os.path, 'samestat', None)
530 554 if followsym and samestat is not None:
531 555 def adddir(dirlst, dirname):
532 556 match = False
533 557 dirstat = os.stat(dirname)
534 558 for lstdirstat in dirlst:
535 559 if samestat(dirstat, lstdirstat):
536 560 match = True
537 561 break
538 562 if not match:
539 563 dirlst.append(dirstat)
540 564 return not match
541 565 else:
542 566 followsym = False
543 567
544 568 if (seen_dirs is None) and followsym:
545 569 seen_dirs = []
546 570 adddir(seen_dirs, path)
547 571 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
548 572 dirs.sort()
549 573 if '.hg' in dirs:
550 574 yield root # found a repository
551 575 qroot = os.path.join(root, '.hg', 'patches')
552 576 if os.path.isdir(os.path.join(qroot, '.hg')):
553 577 yield qroot # we have a patch queue repo here
554 578 if recurse:
555 579 # avoid recursing inside the .hg directory
556 580 dirs.remove('.hg')
557 581 else:
558 582 dirs[:] = [] # don't descend further
559 583 elif followsym:
560 584 newdirs = []
561 585 for d in dirs:
562 586 fname = os.path.join(root, d)
563 587 if adddir(seen_dirs, fname):
564 588 if os.path.islink(fname):
565 589 for hgname in walkrepos(fname, True, seen_dirs):
566 590 yield hgname
567 591 else:
568 592 newdirs.append(d)
569 593 dirs[:] = newdirs
570 594
571 595 def osrcpath():
572 596 '''return default os-specific hgrc search path'''
573 597 path = []
574 598 defaultpath = os.path.join(util.datapath, 'default.d')
575 599 if os.path.isdir(defaultpath):
576 600 for f, kind in osutil.listdir(defaultpath):
577 601 if f.endswith('.rc'):
578 602 path.append(os.path.join(defaultpath, f))
579 603 path.extend(systemrcpath())
580 604 path.extend(userrcpath())
581 605 path = [os.path.normpath(f) for f in path]
582 606 return path
583 607
584 608 _rcpath = None
585 609
586 610 def rcpath():
587 611 '''return hgrc search path. if env var HGRCPATH is set, use it.
588 612 for each item in path, if directory, use files ending in .rc,
589 613 else use item.
590 614 make HGRCPATH empty to only look in .hg/hgrc of current repo.
591 615 if no HGRCPATH, use default os-specific path.'''
592 616 global _rcpath
593 617 if _rcpath is None:
594 618 if 'HGRCPATH' in os.environ:
595 619 _rcpath = []
596 620 for p in os.environ['HGRCPATH'].split(os.pathsep):
597 621 if not p:
598 622 continue
599 623 p = util.expandpath(p)
600 624 if os.path.isdir(p):
601 625 for f, kind in osutil.listdir(p):
602 626 if f.endswith('.rc'):
603 627 _rcpath.append(os.path.join(p, f))
604 628 else:
605 629 _rcpath.append(p)
606 630 else:
607 631 _rcpath = osrcpath()
608 632 return _rcpath
609 633
610 634 def intrev(repo, rev):
611 635 """Return integer for a given revision that can be used in comparison or
612 636 arithmetic operation"""
613 637 if rev is None:
614 638 return len(repo)
615 639 return rev
616 640
617 641 def revsingle(repo, revspec, default='.'):
618 642 if not revspec and revspec != 0:
619 643 return repo[default]
620 644
621 645 l = revrange(repo, [revspec])
622 646 if not l:
623 647 raise util.Abort(_('empty revision set'))
624 648 return repo[l.last()]
625 649
626 650 def revpair(repo, revs):
627 651 if not revs:
628 652 return repo.dirstate.p1(), None
629 653
630 654 l = revrange(repo, revs)
631 655
632 656 if not l:
633 657 first = second = None
634 658 elif l.isascending():
635 659 first = l.min()
636 660 second = l.max()
637 661 elif l.isdescending():
638 662 first = l.max()
639 663 second = l.min()
640 664 else:
641 665 first = l.first()
642 666 second = l.last()
643 667
644 668 if first is None:
645 669 raise util.Abort(_('empty revision range'))
646 670
647 671 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
648 672 return repo.lookup(first), None
649 673
650 674 return repo.lookup(first), repo.lookup(second)
651 675
652 676 _revrangesep = ':'
653 677
654 678 def revrange(repo, revs):
655 679 """Yield revision as strings from a list of revision specifications."""
656 680
657 681 def revfix(repo, val, defval):
658 682 if not val and val != 0 and defval is not None:
659 683 return defval
660 684 return repo[val].rev()
661 685
662 686 seen, l = set(), revset.baseset([])
663 687
664 688 revsetaliases = [alias for (alias, _) in
665 689 repo.ui.configitems("revsetalias")]
666 690
667 691 for spec in revs:
668 692 if l and not seen:
669 693 seen = set(l)
670 694 # attempt to parse old-style ranges first to deal with
671 695 # things like old-tag which contain query metacharacters
672 696 try:
673 697 # ... except for revset aliases without arguments. These
674 698 # should be parsed as soon as possible, because they might
675 699 # clash with a hash prefix.
676 700 if spec in revsetaliases:
677 701 raise error.RepoLookupError
678 702
679 703 if isinstance(spec, int):
680 704 seen.add(spec)
681 705 l = l + revset.baseset([spec])
682 706 continue
683 707
684 708 if _revrangesep in spec:
685 709 start, end = spec.split(_revrangesep, 1)
686 710 if start in revsetaliases or end in revsetaliases:
687 711 raise error.RepoLookupError
688 712
689 713 start = revfix(repo, start, 0)
690 714 end = revfix(repo, end, len(repo) - 1)
691 715 if end == nullrev and start < 0:
692 716 start = nullrev
693 717 rangeiter = repo.changelog.revs(start, end)
694 718 if not seen and not l:
695 719 # by far the most common case: revs = ["-1:0"]
696 720 l = revset.baseset(rangeiter)
697 721 # defer syncing seen until next iteration
698 722 continue
699 723 newrevs = set(rangeiter)
700 724 if seen:
701 725 newrevs.difference_update(seen)
702 726 seen.update(newrevs)
703 727 else:
704 728 seen = newrevs
705 729 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
706 730 continue
707 731 elif spec and spec in repo: # single unquoted rev
708 732 rev = revfix(repo, spec, None)
709 733 if rev in seen:
710 734 continue
711 735 seen.add(rev)
712 736 l = l + revset.baseset([rev])
713 737 continue
714 738 except error.RepoLookupError:
715 739 pass
716 740
717 741 # fall through to new-style queries if old-style fails
718 742 m = revset.match(repo.ui, spec, repo)
719 743 if seen or l:
720 744 dl = [r for r in m(repo) if r not in seen]
721 745 l = l + revset.baseset(dl)
722 746 seen.update(dl)
723 747 else:
724 748 l = m(repo)
725 749
726 750 return l
727 751
728 752 def expandpats(pats):
729 753 '''Expand bare globs when running on windows.
730 754 On posix we assume it already has already been done by sh.'''
731 755 if not util.expandglobs:
732 756 return list(pats)
733 757 ret = []
734 758 for kindpat in pats:
735 759 kind, pat = matchmod._patsplit(kindpat, None)
736 760 if kind is None:
737 761 try:
738 762 globbed = glob.glob(pat)
739 763 except re.error:
740 764 globbed = [pat]
741 765 if globbed:
742 766 ret.extend(globbed)
743 767 continue
744 768 ret.append(kindpat)
745 769 return ret
746 770
747 771 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
748 772 '''Return a matcher and the patterns that were used.
749 773 The matcher will warn about bad matches.'''
750 774 if pats == ("",):
751 775 pats = []
752 776 if not globbed and default == 'relpath':
753 777 pats = expandpats(pats or [])
754 778
755 779 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
756 780 default)
757 781 def badfn(f, msg):
758 782 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
759 783 m.bad = badfn
760 784 if m.always():
761 785 pats = []
762 786 return m, pats
763 787
764 788 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
765 789 '''Return a matcher that will warn about bad matches.'''
766 790 return matchandpats(ctx, pats, opts, globbed, default)[0]
767 791
768 792 def matchall(repo):
769 793 '''Return a matcher that will efficiently match everything.'''
770 794 return matchmod.always(repo.root, repo.getcwd())
771 795
772 796 def matchfiles(repo, files):
773 797 '''Return a matcher that will efficiently match exactly these files.'''
774 798 return matchmod.exact(repo.root, repo.getcwd(), files)
775 799
776 800 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
777 801 m = matcher
778 802 if dry_run is None:
779 803 dry_run = opts.get('dry_run')
780 804 if similarity is None:
781 805 similarity = float(opts.get('similarity') or 0)
782 806
783 807 ret = 0
784 808 join = lambda f: os.path.join(prefix, f)
785 809
786 810 def matchessubrepo(matcher, subpath):
787 811 if matcher.exact(subpath):
788 812 return True
789 813 for f in matcher.files():
790 814 if f.startswith(subpath):
791 815 return True
792 816 return False
793 817
794 818 wctx = repo[None]
795 819 for subpath in sorted(wctx.substate):
796 820 if opts.get('subrepos') or matchessubrepo(m, subpath):
797 821 sub = wctx.sub(subpath)
798 822 try:
799 823 submatch = matchmod.narrowmatcher(subpath, m)
800 824 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
801 825 ret = 1
802 826 except error.LookupError:
803 827 repo.ui.status(_("skipping missing subrepository: %s\n")
804 828 % join(subpath))
805 829
806 830 rejected = []
807 831 origbad = m.bad
808 832 def badfn(f, msg):
809 833 if f in m.files():
810 834 origbad(f, msg)
811 835 rejected.append(f)
812 836
813 837 m.bad = badfn
814 838 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
815 839 m.bad = origbad
816 840
817 841 unknownset = set(unknown + forgotten)
818 842 toprint = unknownset.copy()
819 843 toprint.update(deleted)
820 844 for abs in sorted(toprint):
821 845 if repo.ui.verbose or not m.exact(abs):
822 846 if abs in unknownset:
823 847 status = _('adding %s\n') % m.uipath(abs)
824 848 else:
825 849 status = _('removing %s\n') % m.uipath(abs)
826 850 repo.ui.status(status)
827 851
828 852 renames = _findrenames(repo, m, added + unknown, removed + deleted,
829 853 similarity)
830 854
831 855 if not dry_run:
832 856 _markchanges(repo, unknown + forgotten, deleted, renames)
833 857
834 858 for f in rejected:
835 859 if f in m.files():
836 860 return 1
837 861 return ret
838 862
839 863 def marktouched(repo, files, similarity=0.0):
840 864 '''Assert that files have somehow been operated upon. files are relative to
841 865 the repo root.'''
842 866 m = matchfiles(repo, files)
843 867 rejected = []
844 868 m.bad = lambda x, y: rejected.append(x)
845 869
846 870 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
847 871
848 872 if repo.ui.verbose:
849 873 unknownset = set(unknown + forgotten)
850 874 toprint = unknownset.copy()
851 875 toprint.update(deleted)
852 876 for abs in sorted(toprint):
853 877 if abs in unknownset:
854 878 status = _('adding %s\n') % abs
855 879 else:
856 880 status = _('removing %s\n') % abs
857 881 repo.ui.status(status)
858 882
859 883 renames = _findrenames(repo, m, added + unknown, removed + deleted,
860 884 similarity)
861 885
862 886 _markchanges(repo, unknown + forgotten, deleted, renames)
863 887
864 888 for f in rejected:
865 889 if f in m.files():
866 890 return 1
867 891 return 0
868 892
869 893 def _interestingfiles(repo, matcher):
870 894 '''Walk dirstate with matcher, looking for files that addremove would care
871 895 about.
872 896
873 897 This is different from dirstate.status because it doesn't care about
874 898 whether files are modified or clean.'''
875 899 added, unknown, deleted, removed, forgotten = [], [], [], [], []
876 900 audit_path = pathutil.pathauditor(repo.root)
877 901
878 902 ctx = repo[None]
879 903 dirstate = repo.dirstate
880 904 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
881 905 full=False)
882 906 for abs, st in walkresults.iteritems():
883 907 dstate = dirstate[abs]
884 908 if dstate == '?' and audit_path.check(abs):
885 909 unknown.append(abs)
886 910 elif dstate != 'r' and not st:
887 911 deleted.append(abs)
888 912 elif dstate == 'r' and st:
889 913 forgotten.append(abs)
890 914 # for finding renames
891 915 elif dstate == 'r' and not st:
892 916 removed.append(abs)
893 917 elif dstate == 'a':
894 918 added.append(abs)
895 919
896 920 return added, unknown, deleted, removed, forgotten
897 921
898 922 def _findrenames(repo, matcher, added, removed, similarity):
899 923 '''Find renames from removed files to added ones.'''
900 924 renames = {}
901 925 if similarity > 0:
902 926 for old, new, score in similar.findrenames(repo, added, removed,
903 927 similarity):
904 928 if (repo.ui.verbose or not matcher.exact(old)
905 929 or not matcher.exact(new)):
906 930 repo.ui.status(_('recording removal of %s as rename to %s '
907 931 '(%d%% similar)\n') %
908 932 (matcher.rel(old), matcher.rel(new),
909 933 score * 100))
910 934 renames[new] = old
911 935 return renames
912 936
913 937 def _markchanges(repo, unknown, deleted, renames):
914 938 '''Marks the files in unknown as added, the files in deleted as removed,
915 939 and the files in renames as copied.'''
916 940 wctx = repo[None]
917 941 wlock = repo.wlock()
918 942 try:
919 943 wctx.forget(deleted)
920 944 wctx.add(unknown)
921 945 for new, old in renames.iteritems():
922 946 wctx.copy(old, new)
923 947 finally:
924 948 wlock.release()
925 949
926 950 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
927 951 """Update the dirstate to reflect the intent of copying src to dst. For
928 952 different reasons it might not end with dst being marked as copied from src.
929 953 """
930 954 origsrc = repo.dirstate.copied(src) or src
931 955 if dst == origsrc: # copying back a copy?
932 956 if repo.dirstate[dst] not in 'mn' and not dryrun:
933 957 repo.dirstate.normallookup(dst)
934 958 else:
935 959 if repo.dirstate[origsrc] == 'a' and origsrc == src:
936 960 if not ui.quiet:
937 961 ui.warn(_("%s has not been committed yet, so no copy "
938 962 "data will be stored for %s.\n")
939 963 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
940 964 if repo.dirstate[dst] in '?r' and not dryrun:
941 965 wctx.add([dst])
942 966 elif not dryrun:
943 967 wctx.copy(origsrc, dst)
944 968
945 969 def readrequires(opener, supported):
946 970 '''Reads and parses .hg/requires and checks if all entries found
947 971 are in the list of supported features.'''
948 972 requirements = set(opener.read("requires").splitlines())
949 973 missings = []
950 974 for r in requirements:
951 975 if r not in supported:
952 976 if not r or not r[0].isalnum():
953 977 raise error.RequirementError(_(".hg/requires file is corrupt"))
954 978 missings.append(r)
955 979 missings.sort()
956 980 if missings:
957 981 raise error.RequirementError(
958 982 _("repository requires features unknown to this Mercurial: %s")
959 983 % " ".join(missings),
960 984 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
961 985 " for more information"))
962 986 return requirements
963 987
964 988 class filecachesubentry(object):
965 989 def __init__(self, path, stat):
966 990 self.path = path
967 991 self.cachestat = None
968 992 self._cacheable = None
969 993
970 994 if stat:
971 995 self.cachestat = filecachesubentry.stat(self.path)
972 996
973 997 if self.cachestat:
974 998 self._cacheable = self.cachestat.cacheable()
975 999 else:
976 1000 # None means we don't know yet
977 1001 self._cacheable = None
978 1002
979 1003 def refresh(self):
980 1004 if self.cacheable():
981 1005 self.cachestat = filecachesubentry.stat(self.path)
982 1006
983 1007 def cacheable(self):
984 1008 if self._cacheable is not None:
985 1009 return self._cacheable
986 1010
987 1011 # we don't know yet, assume it is for now
988 1012 return True
989 1013
990 1014 def changed(self):
991 1015 # no point in going further if we can't cache it
992 1016 if not self.cacheable():
993 1017 return True
994 1018
995 1019 newstat = filecachesubentry.stat(self.path)
996 1020
997 1021 # we may not know if it's cacheable yet, check again now
998 1022 if newstat and self._cacheable is None:
999 1023 self._cacheable = newstat.cacheable()
1000 1024
1001 1025 # check again
1002 1026 if not self._cacheable:
1003 1027 return True
1004 1028
1005 1029 if self.cachestat != newstat:
1006 1030 self.cachestat = newstat
1007 1031 return True
1008 1032 else:
1009 1033 return False
1010 1034
1011 1035 @staticmethod
1012 1036 def stat(path):
1013 1037 try:
1014 1038 return util.cachestat(path)
1015 1039 except OSError, e:
1016 1040 if e.errno != errno.ENOENT:
1017 1041 raise
1018 1042
1019 1043 class filecacheentry(object):
1020 1044 def __init__(self, paths, stat=True):
1021 1045 self._entries = []
1022 1046 for path in paths:
1023 1047 self._entries.append(filecachesubentry(path, stat))
1024 1048
1025 1049 def changed(self):
1026 1050 '''true if any entry has changed'''
1027 1051 for entry in self._entries:
1028 1052 if entry.changed():
1029 1053 return True
1030 1054 return False
1031 1055
1032 1056 def refresh(self):
1033 1057 for entry in self._entries:
1034 1058 entry.refresh()
1035 1059
1036 1060 class filecache(object):
1037 1061 '''A property like decorator that tracks files under .hg/ for updates.
1038 1062
1039 1063 Records stat info when called in _filecache.
1040 1064
1041 1065 On subsequent calls, compares old stat info with new info, and recreates the
1042 1066 object when any of the files changes, updating the new stat info in
1043 1067 _filecache.
1044 1068
1045 1069 Mercurial either atomic renames or appends for files under .hg,
1046 1070 so to ensure the cache is reliable we need the filesystem to be able
1047 1071 to tell us if a file has been replaced. If it can't, we fallback to
1048 1072 recreating the object on every call (essentially the same behaviour as
1049 1073 propertycache).
1050 1074
1051 1075 '''
1052 1076 def __init__(self, *paths):
1053 1077 self.paths = paths
1054 1078
1055 1079 def join(self, obj, fname):
1056 1080 """Used to compute the runtime path of a cached file.
1057 1081
1058 1082 Users should subclass filecache and provide their own version of this
1059 1083 function to call the appropriate join function on 'obj' (an instance
1060 1084 of the class that its member function was decorated).
1061 1085 """
1062 1086 return obj.join(fname)
1063 1087
1064 1088 def __call__(self, func):
1065 1089 self.func = func
1066 1090 self.name = func.__name__
1067 1091 return self
1068 1092
1069 1093 def __get__(self, obj, type=None):
1070 1094 # do we need to check if the file changed?
1071 1095 if self.name in obj.__dict__:
1072 1096 assert self.name in obj._filecache, self.name
1073 1097 return obj.__dict__[self.name]
1074 1098
1075 1099 entry = obj._filecache.get(self.name)
1076 1100
1077 1101 if entry:
1078 1102 if entry.changed():
1079 1103 entry.obj = self.func(obj)
1080 1104 else:
1081 1105 paths = [self.join(obj, path) for path in self.paths]
1082 1106
1083 1107 # We stat -before- creating the object so our cache doesn't lie if
1084 1108 # a writer modified between the time we read and stat
1085 1109 entry = filecacheentry(paths, True)
1086 1110 entry.obj = self.func(obj)
1087 1111
1088 1112 obj._filecache[self.name] = entry
1089 1113
1090 1114 obj.__dict__[self.name] = entry.obj
1091 1115 return entry.obj
1092 1116
1093 1117 def __set__(self, obj, value):
1094 1118 if self.name not in obj._filecache:
1095 1119 # we add an entry for the missing value because X in __dict__
1096 1120 # implies X in _filecache
1097 1121 paths = [self.join(obj, path) for path in self.paths]
1098 1122 ce = filecacheentry(paths, False)
1099 1123 obj._filecache[self.name] = ce
1100 1124 else:
1101 1125 ce = obj._filecache[self.name]
1102 1126
1103 1127 ce.obj = value # update cached copy
1104 1128 obj.__dict__[self.name] = value # update copy returned by obj.x
1105 1129
1106 1130 def __delete__(self, obj):
1107 1131 try:
1108 1132 del obj.__dict__[self.name]
1109 1133 except KeyError:
1110 1134 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now