##// END OF EJS Templates
repoview: move function for computing filtered hash...
Gregory Szorc -
r24723:467a3314 default
parent child Browse files
Show More
@@ -1,460 +1,441 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import scmutil
10 import util
11 import util
11 import time
12 import time
12 from array import array
13 from array import array
13 from struct import calcsize, pack, unpack
14 from struct import calcsize, pack, unpack
14
15
15 def _filename(repo):
16 def _filename(repo):
16 """name of a branchcache file for a given repo or repoview"""
17 """name of a branchcache file for a given repo or repoview"""
17 filename = "cache/branch2"
18 filename = "cache/branch2"
18 if repo.filtername:
19 if repo.filtername:
19 filename = '%s-%s' % (filename, repo.filtername)
20 filename = '%s-%s' % (filename, repo.filtername)
20 return filename
21 return filename
21
22
22 def read(repo):
23 def read(repo):
23 try:
24 try:
24 f = repo.vfs(_filename(repo))
25 f = repo.vfs(_filename(repo))
25 lines = f.read().split('\n')
26 lines = f.read().split('\n')
26 f.close()
27 f.close()
27 except (IOError, OSError):
28 except (IOError, OSError):
28 return None
29 return None
29
30
30 try:
31 try:
31 cachekey = lines.pop(0).split(" ", 2)
32 cachekey = lines.pop(0).split(" ", 2)
32 last, lrev = cachekey[:2]
33 last, lrev = cachekey[:2]
33 last, lrev = bin(last), int(lrev)
34 last, lrev = bin(last), int(lrev)
34 filteredhash = None
35 filteredhash = None
35 if len(cachekey) > 2:
36 if len(cachekey) > 2:
36 filteredhash = bin(cachekey[2])
37 filteredhash = bin(cachekey[2])
37 partial = branchcache(tipnode=last, tiprev=lrev,
38 partial = branchcache(tipnode=last, tiprev=lrev,
38 filteredhash=filteredhash)
39 filteredhash=filteredhash)
39 if not partial.validfor(repo):
40 if not partial.validfor(repo):
40 # invalidate the cache
41 # invalidate the cache
41 raise ValueError('tip differs')
42 raise ValueError('tip differs')
42 for l in lines:
43 for l in lines:
43 if not l:
44 if not l:
44 continue
45 continue
45 node, state, label = l.split(" ", 2)
46 node, state, label = l.split(" ", 2)
46 if state not in 'oc':
47 if state not in 'oc':
47 raise ValueError('invalid branch state')
48 raise ValueError('invalid branch state')
48 label = encoding.tolocal(label.strip())
49 label = encoding.tolocal(label.strip())
49 if not node in repo:
50 if not node in repo:
50 raise ValueError('node %s does not exist' % node)
51 raise ValueError('node %s does not exist' % node)
51 node = bin(node)
52 node = bin(node)
52 partial.setdefault(label, []).append(node)
53 partial.setdefault(label, []).append(node)
53 if state == 'c':
54 if state == 'c':
54 partial._closednodes.add(node)
55 partial._closednodes.add(node)
55 except KeyboardInterrupt:
56 except KeyboardInterrupt:
56 raise
57 raise
57 except Exception, inst:
58 except Exception, inst:
58 if repo.ui.debugflag:
59 if repo.ui.debugflag:
59 msg = 'invalid branchheads cache'
60 msg = 'invalid branchheads cache'
60 if repo.filtername is not None:
61 if repo.filtername is not None:
61 msg += ' (%s)' % repo.filtername
62 msg += ' (%s)' % repo.filtername
62 msg += ': %s\n'
63 msg += ': %s\n'
63 repo.ui.debug(msg % inst)
64 repo.ui.debug(msg % inst)
64 partial = None
65 partial = None
65 return partial
66 return partial
66
67
67 ### Nearest subset relation
68 ### Nearest subset relation
68 # Nearest subset of filter X is a filter Y so that:
69 # Nearest subset of filter X is a filter Y so that:
69 # * Y is included in X,
70 # * Y is included in X,
70 # * X - Y is as small as possible.
71 # * X - Y is as small as possible.
71 # This create and ordering used for branchmap purpose.
72 # This create and ordering used for branchmap purpose.
72 # the ordering may be partial
73 # the ordering may be partial
73 subsettable = {None: 'visible',
74 subsettable = {None: 'visible',
74 'visible': 'served',
75 'visible': 'served',
75 'served': 'immutable',
76 'served': 'immutable',
76 'immutable': 'base'}
77 'immutable': 'base'}
77
78
78 def updatecache(repo):
79 def updatecache(repo):
79 cl = repo.changelog
80 cl = repo.changelog
80 filtername = repo.filtername
81 filtername = repo.filtername
81 partial = repo._branchcaches.get(filtername)
82 partial = repo._branchcaches.get(filtername)
82
83
83 revs = []
84 revs = []
84 if partial is None or not partial.validfor(repo):
85 if partial is None or not partial.validfor(repo):
85 partial = read(repo)
86 partial = read(repo)
86 if partial is None:
87 if partial is None:
87 subsetname = subsettable.get(filtername)
88 subsetname = subsettable.get(filtername)
88 if subsetname is None:
89 if subsetname is None:
89 partial = branchcache()
90 partial = branchcache()
90 else:
91 else:
91 subset = repo.filtered(subsetname)
92 subset = repo.filtered(subsetname)
92 partial = subset.branchmap().copy()
93 partial = subset.branchmap().copy()
93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
94 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
95 revs.extend(r for r in extrarevs if r <= partial.tiprev)
95 revs.extend(cl.revs(start=partial.tiprev + 1))
96 revs.extend(cl.revs(start=partial.tiprev + 1))
96 if revs:
97 if revs:
97 partial.update(repo, revs)
98 partial.update(repo, revs)
98 partial.write(repo)
99 partial.write(repo)
99
100
100 assert partial.validfor(repo), filtername
101 assert partial.validfor(repo), filtername
101 repo._branchcaches[repo.filtername] = partial
102 repo._branchcaches[repo.filtername] = partial
102
103
103 class branchcache(dict):
104 class branchcache(dict):
104 """A dict like object that hold branches heads cache.
105 """A dict like object that hold branches heads cache.
105
106
106 This cache is used to avoid costly computations to determine all the
107 This cache is used to avoid costly computations to determine all the
107 branch heads of a repo.
108 branch heads of a repo.
108
109
109 The cache is serialized on disk in the following format:
110 The cache is serialized on disk in the following format:
110
111
111 <tip hex node> <tip rev number> [optional filtered repo hex hash]
112 <tip hex node> <tip rev number> [optional filtered repo hex hash]
112 <branch head hex node> <open/closed state> <branch name>
113 <branch head hex node> <open/closed state> <branch name>
113 <branch head hex node> <open/closed state> <branch name>
114 <branch head hex node> <open/closed state> <branch name>
114 ...
115 ...
115
116
116 The first line is used to check if the cache is still valid. If the
117 The first line is used to check if the cache is still valid. If the
117 branch cache is for a filtered repo view, an optional third hash is
118 branch cache is for a filtered repo view, an optional third hash is
118 included that hashes the hashes of all filtered revisions.
119 included that hashes the hashes of all filtered revisions.
119
120
120 The open/closed state is represented by a single letter 'o' or 'c'.
121 The open/closed state is represented by a single letter 'o' or 'c'.
121 This field can be used to avoid changelog reads when determining if a
122 This field can be used to avoid changelog reads when determining if a
122 branch head closes a branch or not.
123 branch head closes a branch or not.
123 """
124 """
124
125
125 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
126 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
126 filteredhash=None, closednodes=None):
127 filteredhash=None, closednodes=None):
127 super(branchcache, self).__init__(entries)
128 super(branchcache, self).__init__(entries)
128 self.tipnode = tipnode
129 self.tipnode = tipnode
129 self.tiprev = tiprev
130 self.tiprev = tiprev
130 self.filteredhash = filteredhash
131 self.filteredhash = filteredhash
131 # closednodes is a set of nodes that close their branch. If the branch
132 # closednodes is a set of nodes that close their branch. If the branch
132 # cache has been updated, it may contain nodes that are no longer
133 # cache has been updated, it may contain nodes that are no longer
133 # heads.
134 # heads.
134 if closednodes is None:
135 if closednodes is None:
135 self._closednodes = set()
136 self._closednodes = set()
136 else:
137 else:
137 self._closednodes = closednodes
138 self._closednodes = closednodes
138
139
139 def _hashfiltered(self, repo):
140 """build hash of revision filtered in the current cache
141
142 Tracking tipnode and tiprev is not enough to ensure validity of the
143 cache as they do not help to distinct cache that ignored various
144 revision bellow tiprev.
145
146 To detect such difference, we build a cache of all ignored revisions.
147 """
148 cl = repo.changelog
149 if not cl.filteredrevs:
150 return None
151 key = None
152 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
153 if revs:
154 s = util.sha1()
155 for rev in revs:
156 s.update('%s;' % rev)
157 key = s.digest()
158 return key
159
160 def validfor(self, repo):
140 def validfor(self, repo):
161 """Is the cache content valid regarding a repo
141 """Is the cache content valid regarding a repo
162
142
163 - False when cached tipnode is unknown or if we detect a strip.
143 - False when cached tipnode is unknown or if we detect a strip.
164 - True when cache is up to date or a subset of current repo."""
144 - True when cache is up to date or a subset of current repo."""
165 try:
145 try:
166 return ((self.tipnode == repo.changelog.node(self.tiprev))
146 return ((self.tipnode == repo.changelog.node(self.tiprev))
167 and (self.filteredhash == self._hashfiltered(repo)))
147 and (self.filteredhash == \
148 scmutil.filteredhash(repo, self.tiprev)))
168 except IndexError:
149 except IndexError:
169 return False
150 return False
170
151
171 def _branchtip(self, heads):
152 def _branchtip(self, heads):
172 '''Return tuple with last open head in heads and false,
153 '''Return tuple with last open head in heads and false,
173 otherwise return last closed head and true.'''
154 otherwise return last closed head and true.'''
174 tip = heads[-1]
155 tip = heads[-1]
175 closed = True
156 closed = True
176 for h in reversed(heads):
157 for h in reversed(heads):
177 if h not in self._closednodes:
158 if h not in self._closednodes:
178 tip = h
159 tip = h
179 closed = False
160 closed = False
180 break
161 break
181 return tip, closed
162 return tip, closed
182
163
183 def branchtip(self, branch):
164 def branchtip(self, branch):
184 '''Return the tipmost open head on branch head, otherwise return the
165 '''Return the tipmost open head on branch head, otherwise return the
185 tipmost closed head on branch.
166 tipmost closed head on branch.
186 Raise KeyError for unknown branch.'''
167 Raise KeyError for unknown branch.'''
187 return self._branchtip(self[branch])[0]
168 return self._branchtip(self[branch])[0]
188
169
189 def branchheads(self, branch, closed=False):
170 def branchheads(self, branch, closed=False):
190 heads = self[branch]
171 heads = self[branch]
191 if not closed:
172 if not closed:
192 heads = [h for h in heads if h not in self._closednodes]
173 heads = [h for h in heads if h not in self._closednodes]
193 return heads
174 return heads
194
175
195 def iterbranches(self):
176 def iterbranches(self):
196 for bn, heads in self.iteritems():
177 for bn, heads in self.iteritems():
197 yield (bn, heads) + self._branchtip(heads)
178 yield (bn, heads) + self._branchtip(heads)
198
179
199 def copy(self):
180 def copy(self):
200 """return an deep copy of the branchcache object"""
181 """return an deep copy of the branchcache object"""
201 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
182 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
202 self._closednodes)
183 self._closednodes)
203
184
204 def write(self, repo):
185 def write(self, repo):
205 try:
186 try:
206 f = repo.vfs(_filename(repo), "w", atomictemp=True)
187 f = repo.vfs(_filename(repo), "w", atomictemp=True)
207 cachekey = [hex(self.tipnode), str(self.tiprev)]
188 cachekey = [hex(self.tipnode), str(self.tiprev)]
208 if self.filteredhash is not None:
189 if self.filteredhash is not None:
209 cachekey.append(hex(self.filteredhash))
190 cachekey.append(hex(self.filteredhash))
210 f.write(" ".join(cachekey) + '\n')
191 f.write(" ".join(cachekey) + '\n')
211 nodecount = 0
192 nodecount = 0
212 for label, nodes in sorted(self.iteritems()):
193 for label, nodes in sorted(self.iteritems()):
213 for node in nodes:
194 for node in nodes:
214 nodecount += 1
195 nodecount += 1
215 if node in self._closednodes:
196 if node in self._closednodes:
216 state = 'c'
197 state = 'c'
217 else:
198 else:
218 state = 'o'
199 state = 'o'
219 f.write("%s %s %s\n" % (hex(node), state,
200 f.write("%s %s %s\n" % (hex(node), state,
220 encoding.fromlocal(label)))
201 encoding.fromlocal(label)))
221 f.close()
202 f.close()
222 repo.ui.log('branchcache',
203 repo.ui.log('branchcache',
223 'wrote %s branch cache with %d labels and %d nodes\n',
204 'wrote %s branch cache with %d labels and %d nodes\n',
224 repo.filtername, len(self), nodecount)
205 repo.filtername, len(self), nodecount)
225 except (IOError, OSError, util.Abort), inst:
206 except (IOError, OSError, util.Abort), inst:
226 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
207 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
227 # Abort may be raise by read only opener
208 # Abort may be raise by read only opener
228 pass
209 pass
229
210
230 def update(self, repo, revgen):
211 def update(self, repo, revgen):
231 """Given a branchhead cache, self, that may have extra nodes or be
212 """Given a branchhead cache, self, that may have extra nodes or be
232 missing heads, and a generator of nodes that are strictly a superset of
213 missing heads, and a generator of nodes that are strictly a superset of
233 heads missing, this function updates self to be correct.
214 heads missing, this function updates self to be correct.
234 """
215 """
235 starttime = time.time()
216 starttime = time.time()
236 cl = repo.changelog
217 cl = repo.changelog
237 # collect new branch entries
218 # collect new branch entries
238 newbranches = {}
219 newbranches = {}
239 getbranchinfo = repo.revbranchcache().branchinfo
220 getbranchinfo = repo.revbranchcache().branchinfo
240 for r in revgen:
221 for r in revgen:
241 branch, closesbranch = getbranchinfo(r)
222 branch, closesbranch = getbranchinfo(r)
242 newbranches.setdefault(branch, []).append(r)
223 newbranches.setdefault(branch, []).append(r)
243 if closesbranch:
224 if closesbranch:
244 self._closednodes.add(cl.node(r))
225 self._closednodes.add(cl.node(r))
245
226
246 # fetch current topological heads to speed up filtering
227 # fetch current topological heads to speed up filtering
247 topoheads = set(cl.headrevs())
228 topoheads = set(cl.headrevs())
248
229
249 # if older branchheads are reachable from new ones, they aren't
230 # if older branchheads are reachable from new ones, they aren't
250 # really branchheads. Note checking parents is insufficient:
231 # really branchheads. Note checking parents is insufficient:
251 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
232 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
252 for branch, newheadrevs in newbranches.iteritems():
233 for branch, newheadrevs in newbranches.iteritems():
253 bheads = self.setdefault(branch, [])
234 bheads = self.setdefault(branch, [])
254 bheadset = set(cl.rev(node) for node in bheads)
235 bheadset = set(cl.rev(node) for node in bheads)
255
236
256 # This have been tested True on all internal usage of this function.
237 # This have been tested True on all internal usage of this function.
257 # run it again in case of doubt
238 # run it again in case of doubt
258 # assert not (set(bheadrevs) & set(newheadrevs))
239 # assert not (set(bheadrevs) & set(newheadrevs))
259 newheadrevs.sort()
240 newheadrevs.sort()
260 bheadset.update(newheadrevs)
241 bheadset.update(newheadrevs)
261
242
262 # This prunes out two kinds of heads - heads that are superseded by
243 # This prunes out two kinds of heads - heads that are superseded by
263 # a head in newheadrevs, and newheadrevs that are not heads because
244 # a head in newheadrevs, and newheadrevs that are not heads because
264 # an existing head is their descendant.
245 # an existing head is their descendant.
265 uncertain = bheadset - topoheads
246 uncertain = bheadset - topoheads
266 if uncertain:
247 if uncertain:
267 floorrev = min(uncertain)
248 floorrev = min(uncertain)
268 ancestors = set(cl.ancestors(newheadrevs, floorrev))
249 ancestors = set(cl.ancestors(newheadrevs, floorrev))
269 bheadset -= ancestors
250 bheadset -= ancestors
270 bheadrevs = sorted(bheadset)
251 bheadrevs = sorted(bheadset)
271 self[branch] = [cl.node(rev) for rev in bheadrevs]
252 self[branch] = [cl.node(rev) for rev in bheadrevs]
272 tiprev = bheadrevs[-1]
253 tiprev = bheadrevs[-1]
273 if tiprev > self.tiprev:
254 if tiprev > self.tiprev:
274 self.tipnode = cl.node(tiprev)
255 self.tipnode = cl.node(tiprev)
275 self.tiprev = tiprev
256 self.tiprev = tiprev
276
257
277 if not self.validfor(repo):
258 if not self.validfor(repo):
278 # cache key are not valid anymore
259 # cache key are not valid anymore
279 self.tipnode = nullid
260 self.tipnode = nullid
280 self.tiprev = nullrev
261 self.tiprev = nullrev
281 for heads in self.values():
262 for heads in self.values():
282 tiprev = max(cl.rev(node) for node in heads)
263 tiprev = max(cl.rev(node) for node in heads)
283 if tiprev > self.tiprev:
264 if tiprev > self.tiprev:
284 self.tipnode = cl.node(tiprev)
265 self.tipnode = cl.node(tiprev)
285 self.tiprev = tiprev
266 self.tiprev = tiprev
286 self.filteredhash = self._hashfiltered(repo)
267 self.filteredhash = scmutil.filteredhash(repo, self.tiprev)
287
268
288 duration = time.time() - starttime
269 duration = time.time() - starttime
289 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
270 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
290 repo.filtername, duration)
271 repo.filtername, duration)
291
272
292 # Revision branch info cache
273 # Revision branch info cache
293
274
294 _rbcversion = '-v1'
275 _rbcversion = '-v1'
295 _rbcnames = 'cache/rbc-names' + _rbcversion
276 _rbcnames = 'cache/rbc-names' + _rbcversion
296 _rbcrevs = 'cache/rbc-revs' + _rbcversion
277 _rbcrevs = 'cache/rbc-revs' + _rbcversion
297 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
278 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
298 _rbcrecfmt = '>4sI'
279 _rbcrecfmt = '>4sI'
299 _rbcrecsize = calcsize(_rbcrecfmt)
280 _rbcrecsize = calcsize(_rbcrecfmt)
300 _rbcnodelen = 4
281 _rbcnodelen = 4
301 _rbcbranchidxmask = 0x7fffffff
282 _rbcbranchidxmask = 0x7fffffff
302 _rbccloseflag = 0x80000000
283 _rbccloseflag = 0x80000000
303
284
304 class revbranchcache(object):
285 class revbranchcache(object):
305 """Persistent cache, mapping from revision number to branch name and close.
286 """Persistent cache, mapping from revision number to branch name and close.
306 This is a low level cache, independent of filtering.
287 This is a low level cache, independent of filtering.
307
288
308 Branch names are stored in rbc-names in internal encoding separated by 0.
289 Branch names are stored in rbc-names in internal encoding separated by 0.
309 rbc-names is append-only, and each branch name is only stored once and will
290 rbc-names is append-only, and each branch name is only stored once and will
310 thus have a unique index.
291 thus have a unique index.
311
292
312 The branch info for each revision is stored in rbc-revs as constant size
293 The branch info for each revision is stored in rbc-revs as constant size
313 records. The whole file is read into memory, but it is only 'parsed' on
294 records. The whole file is read into memory, but it is only 'parsed' on
314 demand. The file is usually append-only but will be truncated if repo
295 demand. The file is usually append-only but will be truncated if repo
315 modification is detected.
296 modification is detected.
316 The record for each revision contains the first 4 bytes of the
297 The record for each revision contains the first 4 bytes of the
317 corresponding node hash, and the record is only used if it still matches.
298 corresponding node hash, and the record is only used if it still matches.
318 Even a completely trashed rbc-revs fill thus still give the right result
299 Even a completely trashed rbc-revs fill thus still give the right result
319 while converging towards full recovery ... assuming no incorrectly matching
300 while converging towards full recovery ... assuming no incorrectly matching
320 node hashes.
301 node hashes.
321 The record also contains 4 bytes where 31 bits contains the index of the
302 The record also contains 4 bytes where 31 bits contains the index of the
322 branch and the last bit indicate that it is a branch close commit.
303 branch and the last bit indicate that it is a branch close commit.
323 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
304 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
324 and will grow with it but be 1/8th of its size.
305 and will grow with it but be 1/8th of its size.
325 """
306 """
326
307
327 def __init__(self, repo, readonly=True):
308 def __init__(self, repo, readonly=True):
328 assert repo.filtername is None
309 assert repo.filtername is None
329 self._repo = repo
310 self._repo = repo
330 self._names = [] # branch names in local encoding with static index
311 self._names = [] # branch names in local encoding with static index
331 self._rbcrevs = array('c') # structs of type _rbcrecfmt
312 self._rbcrevs = array('c') # structs of type _rbcrecfmt
332 self._rbcsnameslen = 0
313 self._rbcsnameslen = 0
333 try:
314 try:
334 bndata = repo.vfs.read(_rbcnames)
315 bndata = repo.vfs.read(_rbcnames)
335 self._rbcsnameslen = len(bndata) # for verification before writing
316 self._rbcsnameslen = len(bndata) # for verification before writing
336 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
317 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
337 except (IOError, OSError), inst:
318 except (IOError, OSError), inst:
338 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
319 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
339 inst)
320 inst)
340 if readonly:
321 if readonly:
341 # don't try to use cache - fall back to the slow path
322 # don't try to use cache - fall back to the slow path
342 self.branchinfo = self._branchinfo
323 self.branchinfo = self._branchinfo
343
324
344 if self._names:
325 if self._names:
345 try:
326 try:
346 data = repo.vfs.read(_rbcrevs)
327 data = repo.vfs.read(_rbcrevs)
347 self._rbcrevs.fromstring(data)
328 self._rbcrevs.fromstring(data)
348 except (IOError, OSError), inst:
329 except (IOError, OSError), inst:
349 repo.ui.debug("couldn't read revision branch cache: %s\n" %
330 repo.ui.debug("couldn't read revision branch cache: %s\n" %
350 inst)
331 inst)
351 # remember number of good records on disk
332 # remember number of good records on disk
352 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
333 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
353 len(repo.changelog))
334 len(repo.changelog))
354 if self._rbcrevslen == 0:
335 if self._rbcrevslen == 0:
355 self._names = []
336 self._names = []
356 self._rbcnamescount = len(self._names) # number of good names on disk
337 self._rbcnamescount = len(self._names) # number of good names on disk
357 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
338 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
358
339
359 def branchinfo(self, rev):
340 def branchinfo(self, rev):
360 """Return branch name and close flag for rev, using and updating
341 """Return branch name and close flag for rev, using and updating
361 persistent cache."""
342 persistent cache."""
362 changelog = self._repo.changelog
343 changelog = self._repo.changelog
363 rbcrevidx = rev * _rbcrecsize
344 rbcrevidx = rev * _rbcrecsize
364
345
365 # if requested rev is missing, add and populate all missing revs
346 # if requested rev is missing, add and populate all missing revs
366 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
347 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
367 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
348 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
368 len(self._rbcrevs)))
349 len(self._rbcrevs)))
369
350
370 # fast path: extract data from cache, use it if node is matching
351 # fast path: extract data from cache, use it if node is matching
371 reponode = changelog.node(rev)[:_rbcnodelen]
352 reponode = changelog.node(rev)[:_rbcnodelen]
372 cachenode, branchidx = unpack(
353 cachenode, branchidx = unpack(
373 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
354 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
374 close = bool(branchidx & _rbccloseflag)
355 close = bool(branchidx & _rbccloseflag)
375 if close:
356 if close:
376 branchidx &= _rbcbranchidxmask
357 branchidx &= _rbcbranchidxmask
377 if cachenode == '\0\0\0\0':
358 if cachenode == '\0\0\0\0':
378 pass
359 pass
379 elif cachenode == reponode:
360 elif cachenode == reponode:
380 return self._names[branchidx], close
361 return self._names[branchidx], close
381 else:
362 else:
382 # rev/node map has changed, invalidate the cache from here up
363 # rev/node map has changed, invalidate the cache from here up
383 truncate = rbcrevidx + _rbcrecsize
364 truncate = rbcrevidx + _rbcrecsize
384 del self._rbcrevs[truncate:]
365 del self._rbcrevs[truncate:]
385 self._rbcrevslen = min(self._rbcrevslen, truncate)
366 self._rbcrevslen = min(self._rbcrevslen, truncate)
386
367
387 # fall back to slow path and make sure it will be written to disk
368 # fall back to slow path and make sure it will be written to disk
388 return self._branchinfo(rev)
369 return self._branchinfo(rev)
389
370
390 def _branchinfo(self, rev):
371 def _branchinfo(self, rev):
391 """Retrieve branch info from changelog and update _rbcrevs"""
372 """Retrieve branch info from changelog and update _rbcrevs"""
392 changelog = self._repo.changelog
373 changelog = self._repo.changelog
393 b, close = changelog.branchinfo(rev)
374 b, close = changelog.branchinfo(rev)
394 if b in self._namesreverse:
375 if b in self._namesreverse:
395 branchidx = self._namesreverse[b]
376 branchidx = self._namesreverse[b]
396 else:
377 else:
397 branchidx = len(self._names)
378 branchidx = len(self._names)
398 self._names.append(b)
379 self._names.append(b)
399 self._namesreverse[b] = branchidx
380 self._namesreverse[b] = branchidx
400 reponode = changelog.node(rev)
381 reponode = changelog.node(rev)
401 if close:
382 if close:
402 branchidx |= _rbccloseflag
383 branchidx |= _rbccloseflag
403 self._setcachedata(rev, reponode, branchidx)
384 self._setcachedata(rev, reponode, branchidx)
404 return b, close
385 return b, close
405
386
406 def _setcachedata(self, rev, node, branchidx):
387 def _setcachedata(self, rev, node, branchidx):
407 """Writes the node's branch data to the in-memory cache data."""
388 """Writes the node's branch data to the in-memory cache data."""
408 rbcrevidx = rev * _rbcrecsize
389 rbcrevidx = rev * _rbcrecsize
409 rec = array('c')
390 rec = array('c')
410 rec.fromstring(pack(_rbcrecfmt, node, branchidx))
391 rec.fromstring(pack(_rbcrecfmt, node, branchidx))
411 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
392 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
412 self._rbcrevslen = min(self._rbcrevslen, rev)
393 self._rbcrevslen = min(self._rbcrevslen, rev)
413
394
414 tr = self._repo.currenttransaction()
395 tr = self._repo.currenttransaction()
415 if tr:
396 if tr:
416 tr.addfinalize('write-revbranchcache', self.write)
397 tr.addfinalize('write-revbranchcache', self.write)
417
398
418 def write(self, tr=None):
399 def write(self, tr=None):
419 """Save branch cache if it is dirty."""
400 """Save branch cache if it is dirty."""
420 repo = self._repo
401 repo = self._repo
421 if self._rbcnamescount < len(self._names):
402 if self._rbcnamescount < len(self._names):
422 try:
403 try:
423 if self._rbcnamescount != 0:
404 if self._rbcnamescount != 0:
424 f = repo.vfs.open(_rbcnames, 'ab')
405 f = repo.vfs.open(_rbcnames, 'ab')
425 if f.tell() == self._rbcsnameslen:
406 if f.tell() == self._rbcsnameslen:
426 f.write('\0')
407 f.write('\0')
427 else:
408 else:
428 f.close()
409 f.close()
429 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
410 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
430 self._rbcnamescount = 0
411 self._rbcnamescount = 0
431 self._rbcrevslen = 0
412 self._rbcrevslen = 0
432 if self._rbcnamescount == 0:
413 if self._rbcnamescount == 0:
433 f = repo.vfs.open(_rbcnames, 'wb')
414 f = repo.vfs.open(_rbcnames, 'wb')
434 f.write('\0'.join(encoding.fromlocal(b)
415 f.write('\0'.join(encoding.fromlocal(b)
435 for b in self._names[self._rbcnamescount:]))
416 for b in self._names[self._rbcnamescount:]))
436 self._rbcsnameslen = f.tell()
417 self._rbcsnameslen = f.tell()
437 f.close()
418 f.close()
438 except (IOError, OSError, util.Abort), inst:
419 except (IOError, OSError, util.Abort), inst:
439 repo.ui.debug("couldn't write revision branch cache names: "
420 repo.ui.debug("couldn't write revision branch cache names: "
440 "%s\n" % inst)
421 "%s\n" % inst)
441 return
422 return
442 self._rbcnamescount = len(self._names)
423 self._rbcnamescount = len(self._names)
443
424
444 start = self._rbcrevslen * _rbcrecsize
425 start = self._rbcrevslen * _rbcrecsize
445 if start != len(self._rbcrevs):
426 if start != len(self._rbcrevs):
446 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
427 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
447 try:
428 try:
448 f = repo.vfs.open(_rbcrevs, 'ab')
429 f = repo.vfs.open(_rbcrevs, 'ab')
449 if f.tell() != start:
430 if f.tell() != start:
450 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
431 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
451 f.seek(start)
432 f.seek(start)
452 f.truncate()
433 f.truncate()
453 end = revs * _rbcrecsize
434 end = revs * _rbcrecsize
454 f.write(self._rbcrevs[start:end])
435 f.write(self._rbcrevs[start:end])
455 f.close()
436 f.close()
456 except (IOError, OSError, util.Abort), inst:
437 except (IOError, OSError, util.Abort), inst:
457 repo.ui.debug("couldn't write revision branch cache: %s\n" %
438 repo.ui.debug("couldn't write revision branch cache: %s\n" %
458 inst)
439 inst)
459 return
440 return
460 self._rbcrevslen = revs
441 self._rbcrevslen = revs
@@ -1,1110 +1,1134 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from mercurial.node import nullrev
9 from mercurial.node import nullrev
10 import util, error, osutil, revset, similar, encoding, phases
10 import util, error, osutil, revset, similar, encoding, phases
11 import pathutil
11 import pathutil
12 import match as matchmod
12 import match as matchmod
13 import os, errno, re, glob, tempfile, shutil, stat
13 import os, errno, re, glob, tempfile, shutil, stat
14
14
15 if os.name == 'nt':
15 if os.name == 'nt':
16 import scmwindows as scmplatform
16 import scmwindows as scmplatform
17 else:
17 else:
18 import scmposix as scmplatform
18 import scmposix as scmplatform
19
19
20 systemrcpath = scmplatform.systemrcpath
20 systemrcpath = scmplatform.systemrcpath
21 userrcpath = scmplatform.userrcpath
21 userrcpath = scmplatform.userrcpath
22
22
23 class status(tuple):
23 class status(tuple):
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
24 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
25 and 'ignored' properties are only relevant to the working copy.
25 and 'ignored' properties are only relevant to the working copy.
26 '''
26 '''
27
27
28 __slots__ = ()
28 __slots__ = ()
29
29
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
30 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
31 clean):
31 clean):
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
32 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
33 ignored, clean))
33 ignored, clean))
34
34
35 @property
35 @property
36 def modified(self):
36 def modified(self):
37 '''files that have been modified'''
37 '''files that have been modified'''
38 return self[0]
38 return self[0]
39
39
40 @property
40 @property
41 def added(self):
41 def added(self):
42 '''files that have been added'''
42 '''files that have been added'''
43 return self[1]
43 return self[1]
44
44
45 @property
45 @property
46 def removed(self):
46 def removed(self):
47 '''files that have been removed'''
47 '''files that have been removed'''
48 return self[2]
48 return self[2]
49
49
50 @property
50 @property
51 def deleted(self):
51 def deleted(self):
52 '''files that are in the dirstate, but have been deleted from the
52 '''files that are in the dirstate, but have been deleted from the
53 working copy (aka "missing")
53 working copy (aka "missing")
54 '''
54 '''
55 return self[3]
55 return self[3]
56
56
57 @property
57 @property
58 def unknown(self):
58 def unknown(self):
59 '''files not in the dirstate that are not ignored'''
59 '''files not in the dirstate that are not ignored'''
60 return self[4]
60 return self[4]
61
61
62 @property
62 @property
63 def ignored(self):
63 def ignored(self):
64 '''files not in the dirstate that are ignored (by _dirignore())'''
64 '''files not in the dirstate that are ignored (by _dirignore())'''
65 return self[5]
65 return self[5]
66
66
67 @property
67 @property
68 def clean(self):
68 def clean(self):
69 '''files that have not been modified'''
69 '''files that have not been modified'''
70 return self[6]
70 return self[6]
71
71
72 def __repr__(self, *args, **kwargs):
72 def __repr__(self, *args, **kwargs):
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
73 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
74 'unknown=%r, ignored=%r, clean=%r>') % self)
74 'unknown=%r, ignored=%r, clean=%r>') % self)
75
75
76 def itersubrepos(ctx1, ctx2):
76 def itersubrepos(ctx1, ctx2):
77 """find subrepos in ctx1 or ctx2"""
77 """find subrepos in ctx1 or ctx2"""
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
78 # Create a (subpath, ctx) mapping where we prefer subpaths from
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
79 # ctx1. The subpaths from ctx2 are important when the .hgsub file
80 # has been modified (in ctx2) but not yet committed (in ctx1).
80 # has been modified (in ctx2) but not yet committed (in ctx1).
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
81 subpaths = dict.fromkeys(ctx2.substate, ctx2)
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
82 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
83 for subpath, ctx in sorted(subpaths.iteritems()):
83 for subpath, ctx in sorted(subpaths.iteritems()):
84 yield subpath, ctx.sub(subpath)
84 yield subpath, ctx.sub(subpath)
85
85
86 def nochangesfound(ui, repo, excluded=None):
86 def nochangesfound(ui, repo, excluded=None):
87 '''Report no changes for push/pull, excluded is None or a list of
87 '''Report no changes for push/pull, excluded is None or a list of
88 nodes excluded from the push/pull.
88 nodes excluded from the push/pull.
89 '''
89 '''
90 secretlist = []
90 secretlist = []
91 if excluded:
91 if excluded:
92 for n in excluded:
92 for n in excluded:
93 if n not in repo:
93 if n not in repo:
94 # discovery should not have included the filtered revision,
94 # discovery should not have included the filtered revision,
95 # we have to explicitly exclude it until discovery is cleanup.
95 # we have to explicitly exclude it until discovery is cleanup.
96 continue
96 continue
97 ctx = repo[n]
97 ctx = repo[n]
98 if ctx.phase() >= phases.secret and not ctx.extinct():
98 if ctx.phase() >= phases.secret and not ctx.extinct():
99 secretlist.append(n)
99 secretlist.append(n)
100
100
101 if secretlist:
101 if secretlist:
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
102 ui.status(_("no changes found (ignored %d secret changesets)\n")
103 % len(secretlist))
103 % len(secretlist))
104 else:
104 else:
105 ui.status(_("no changes found\n"))
105 ui.status(_("no changes found\n"))
106
106
107 def checknewlabel(repo, lbl, kind):
107 def checknewlabel(repo, lbl, kind):
108 # Do not use the "kind" parameter in ui output.
108 # Do not use the "kind" parameter in ui output.
109 # It makes strings difficult to translate.
109 # It makes strings difficult to translate.
110 if lbl in ['tip', '.', 'null']:
110 if lbl in ['tip', '.', 'null']:
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
111 raise util.Abort(_("the name '%s' is reserved") % lbl)
112 for c in (':', '\0', '\n', '\r'):
112 for c in (':', '\0', '\n', '\r'):
113 if c in lbl:
113 if c in lbl:
114 raise util.Abort(_("%r cannot be used in a name") % c)
114 raise util.Abort(_("%r cannot be used in a name") % c)
115 try:
115 try:
116 int(lbl)
116 int(lbl)
117 raise util.Abort(_("cannot use an integer as a name"))
117 raise util.Abort(_("cannot use an integer as a name"))
118 except ValueError:
118 except ValueError:
119 pass
119 pass
120
120
121 def checkfilename(f):
121 def checkfilename(f):
122 '''Check that the filename f is an acceptable filename for a tracked file'''
122 '''Check that the filename f is an acceptable filename for a tracked file'''
123 if '\r' in f or '\n' in f:
123 if '\r' in f or '\n' in f:
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
124 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
125
125
126 def checkportable(ui, f):
126 def checkportable(ui, f):
127 '''Check if filename f is portable and warn or abort depending on config'''
127 '''Check if filename f is portable and warn or abort depending on config'''
128 checkfilename(f)
128 checkfilename(f)
129 abort, warn = checkportabilityalert(ui)
129 abort, warn = checkportabilityalert(ui)
130 if abort or warn:
130 if abort or warn:
131 msg = util.checkwinfilename(f)
131 msg = util.checkwinfilename(f)
132 if msg:
132 if msg:
133 msg = "%s: %r" % (msg, f)
133 msg = "%s: %r" % (msg, f)
134 if abort:
134 if abort:
135 raise util.Abort(msg)
135 raise util.Abort(msg)
136 ui.warn(_("warning: %s\n") % msg)
136 ui.warn(_("warning: %s\n") % msg)
137
137
138 def checkportabilityalert(ui):
138 def checkportabilityalert(ui):
139 '''check if the user's config requests nothing, a warning, or abort for
139 '''check if the user's config requests nothing, a warning, or abort for
140 non-portable filenames'''
140 non-portable filenames'''
141 val = ui.config('ui', 'portablefilenames', 'warn')
141 val = ui.config('ui', 'portablefilenames', 'warn')
142 lval = val.lower()
142 lval = val.lower()
143 bval = util.parsebool(val)
143 bval = util.parsebool(val)
144 abort = os.name == 'nt' or lval == 'abort'
144 abort = os.name == 'nt' or lval == 'abort'
145 warn = bval or lval == 'warn'
145 warn = bval or lval == 'warn'
146 if bval is None and not (warn or abort or lval == 'ignore'):
146 if bval is None and not (warn or abort or lval == 'ignore'):
147 raise error.ConfigError(
147 raise error.ConfigError(
148 _("ui.portablefilenames value is invalid ('%s')") % val)
148 _("ui.portablefilenames value is invalid ('%s')") % val)
149 return abort, warn
149 return abort, warn
150
150
151 class casecollisionauditor(object):
151 class casecollisionauditor(object):
152 def __init__(self, ui, abort, dirstate):
152 def __init__(self, ui, abort, dirstate):
153 self._ui = ui
153 self._ui = ui
154 self._abort = abort
154 self._abort = abort
155 allfiles = '\0'.join(dirstate._map)
155 allfiles = '\0'.join(dirstate._map)
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
156 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
157 self._dirstate = dirstate
157 self._dirstate = dirstate
158 # The purpose of _newfiles is so that we don't complain about
158 # The purpose of _newfiles is so that we don't complain about
159 # case collisions if someone were to call this object with the
159 # case collisions if someone were to call this object with the
160 # same filename twice.
160 # same filename twice.
161 self._newfiles = set()
161 self._newfiles = set()
162
162
163 def __call__(self, f):
163 def __call__(self, f):
164 if f in self._newfiles:
164 if f in self._newfiles:
165 return
165 return
166 fl = encoding.lower(f)
166 fl = encoding.lower(f)
167 if fl in self._loweredfiles and f not in self._dirstate:
167 if fl in self._loweredfiles and f not in self._dirstate:
168 msg = _('possible case-folding collision for %s') % f
168 msg = _('possible case-folding collision for %s') % f
169 if self._abort:
169 if self._abort:
170 raise util.Abort(msg)
170 raise util.Abort(msg)
171 self._ui.warn(_("warning: %s\n") % msg)
171 self._ui.warn(_("warning: %s\n") % msg)
172 self._loweredfiles.add(fl)
172 self._loweredfiles.add(fl)
173 self._newfiles.add(f)
173 self._newfiles.add(f)
174
174
175 def filteredhash(repo, maxrev):
176 """build hash of filtered revisions in the current repoview.
177
178 Multiple caches perform up-to-date validation by checking that the
179 tiprev and tipnode stored in the cache file match the current repository.
180 However, this is not sufficient for validating repoviews because the set
181 of revisions in the view may change without the repository tiprev and
182 tipnode changing.
183
184 This function hashes all the revs filtered from the view and returns
185 that SHA-1 digest.
186 """
187 cl = repo.changelog
188 if not cl.filteredrevs:
189 return None
190 key = None
191 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
192 if revs:
193 s = util.sha1()
194 for rev in revs:
195 s.update('%s;' % rev)
196 key = s.digest()
197 return key
198
175 class abstractvfs(object):
199 class abstractvfs(object):
176 """Abstract base class; cannot be instantiated"""
200 """Abstract base class; cannot be instantiated"""
177
201
178 def __init__(self, *args, **kwargs):
202 def __init__(self, *args, **kwargs):
179 '''Prevent instantiation; don't call this from subclasses.'''
203 '''Prevent instantiation; don't call this from subclasses.'''
180 raise NotImplementedError('attempted instantiating ' + str(type(self)))
204 raise NotImplementedError('attempted instantiating ' + str(type(self)))
181
205
182 def tryread(self, path):
206 def tryread(self, path):
183 '''gracefully return an empty string for missing files'''
207 '''gracefully return an empty string for missing files'''
184 try:
208 try:
185 return self.read(path)
209 return self.read(path)
186 except IOError, inst:
210 except IOError, inst:
187 if inst.errno != errno.ENOENT:
211 if inst.errno != errno.ENOENT:
188 raise
212 raise
189 return ""
213 return ""
190
214
191 def tryreadlines(self, path, mode='rb'):
215 def tryreadlines(self, path, mode='rb'):
192 '''gracefully return an empty array for missing files'''
216 '''gracefully return an empty array for missing files'''
193 try:
217 try:
194 return self.readlines(path, mode=mode)
218 return self.readlines(path, mode=mode)
195 except IOError, inst:
219 except IOError, inst:
196 if inst.errno != errno.ENOENT:
220 if inst.errno != errno.ENOENT:
197 raise
221 raise
198 return []
222 return []
199
223
200 def open(self, path, mode="r", text=False, atomictemp=False,
224 def open(self, path, mode="r", text=False, atomictemp=False,
201 notindexed=False):
225 notindexed=False):
202 '''Open ``path`` file, which is relative to vfs root.
226 '''Open ``path`` file, which is relative to vfs root.
203
227
204 Newly created directories are marked as "not to be indexed by
228 Newly created directories are marked as "not to be indexed by
205 the content indexing service", if ``notindexed`` is specified
229 the content indexing service", if ``notindexed`` is specified
206 for "write" mode access.
230 for "write" mode access.
207 '''
231 '''
208 self.open = self.__call__
232 self.open = self.__call__
209 return self.__call__(path, mode, text, atomictemp, notindexed)
233 return self.__call__(path, mode, text, atomictemp, notindexed)
210
234
211 def read(self, path):
235 def read(self, path):
212 fp = self(path, 'rb')
236 fp = self(path, 'rb')
213 try:
237 try:
214 return fp.read()
238 return fp.read()
215 finally:
239 finally:
216 fp.close()
240 fp.close()
217
241
218 def readlines(self, path, mode='rb'):
242 def readlines(self, path, mode='rb'):
219 fp = self(path, mode=mode)
243 fp = self(path, mode=mode)
220 try:
244 try:
221 return fp.readlines()
245 return fp.readlines()
222 finally:
246 finally:
223 fp.close()
247 fp.close()
224
248
225 def write(self, path, data):
249 def write(self, path, data):
226 fp = self(path, 'wb')
250 fp = self(path, 'wb')
227 try:
251 try:
228 return fp.write(data)
252 return fp.write(data)
229 finally:
253 finally:
230 fp.close()
254 fp.close()
231
255
232 def writelines(self, path, data, mode='wb', notindexed=False):
256 def writelines(self, path, data, mode='wb', notindexed=False):
233 fp = self(path, mode=mode, notindexed=notindexed)
257 fp = self(path, mode=mode, notindexed=notindexed)
234 try:
258 try:
235 return fp.writelines(data)
259 return fp.writelines(data)
236 finally:
260 finally:
237 fp.close()
261 fp.close()
238
262
239 def append(self, path, data):
263 def append(self, path, data):
240 fp = self(path, 'ab')
264 fp = self(path, 'ab')
241 try:
265 try:
242 return fp.write(data)
266 return fp.write(data)
243 finally:
267 finally:
244 fp.close()
268 fp.close()
245
269
246 def chmod(self, path, mode):
270 def chmod(self, path, mode):
247 return os.chmod(self.join(path), mode)
271 return os.chmod(self.join(path), mode)
248
272
249 def exists(self, path=None):
273 def exists(self, path=None):
250 return os.path.exists(self.join(path))
274 return os.path.exists(self.join(path))
251
275
252 def fstat(self, fp):
276 def fstat(self, fp):
253 return util.fstat(fp)
277 return util.fstat(fp)
254
278
255 def isdir(self, path=None):
279 def isdir(self, path=None):
256 return os.path.isdir(self.join(path))
280 return os.path.isdir(self.join(path))
257
281
258 def isfile(self, path=None):
282 def isfile(self, path=None):
259 return os.path.isfile(self.join(path))
283 return os.path.isfile(self.join(path))
260
284
261 def islink(self, path=None):
285 def islink(self, path=None):
262 return os.path.islink(self.join(path))
286 return os.path.islink(self.join(path))
263
287
264 def reljoin(self, *paths):
288 def reljoin(self, *paths):
265 """join various elements of a path together (as os.path.join would do)
289 """join various elements of a path together (as os.path.join would do)
266
290
267 The vfs base is not injected so that path stay relative. This exists
291 The vfs base is not injected so that path stay relative. This exists
268 to allow handling of strange encoding if needed."""
292 to allow handling of strange encoding if needed."""
269 return os.path.join(*paths)
293 return os.path.join(*paths)
270
294
271 def split(self, path):
295 def split(self, path):
272 """split top-most element of a path (as os.path.split would do)
296 """split top-most element of a path (as os.path.split would do)
273
297
274 This exists to allow handling of strange encoding if needed."""
298 This exists to allow handling of strange encoding if needed."""
275 return os.path.split(path)
299 return os.path.split(path)
276
300
277 def lexists(self, path=None):
301 def lexists(self, path=None):
278 return os.path.lexists(self.join(path))
302 return os.path.lexists(self.join(path))
279
303
280 def lstat(self, path=None):
304 def lstat(self, path=None):
281 return os.lstat(self.join(path))
305 return os.lstat(self.join(path))
282
306
283 def listdir(self, path=None):
307 def listdir(self, path=None):
284 return os.listdir(self.join(path))
308 return os.listdir(self.join(path))
285
309
286 def makedir(self, path=None, notindexed=True):
310 def makedir(self, path=None, notindexed=True):
287 return util.makedir(self.join(path), notindexed)
311 return util.makedir(self.join(path), notindexed)
288
312
289 def makedirs(self, path=None, mode=None):
313 def makedirs(self, path=None, mode=None):
290 return util.makedirs(self.join(path), mode)
314 return util.makedirs(self.join(path), mode)
291
315
292 def makelock(self, info, path):
316 def makelock(self, info, path):
293 return util.makelock(info, self.join(path))
317 return util.makelock(info, self.join(path))
294
318
295 def mkdir(self, path=None):
319 def mkdir(self, path=None):
296 return os.mkdir(self.join(path))
320 return os.mkdir(self.join(path))
297
321
298 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
322 def mkstemp(self, suffix='', prefix='tmp', dir=None, text=False):
299 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
323 fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
300 dir=self.join(dir), text=text)
324 dir=self.join(dir), text=text)
301 dname, fname = util.split(name)
325 dname, fname = util.split(name)
302 if dir:
326 if dir:
303 return fd, os.path.join(dir, fname)
327 return fd, os.path.join(dir, fname)
304 else:
328 else:
305 return fd, fname
329 return fd, fname
306
330
307 def readdir(self, path=None, stat=None, skip=None):
331 def readdir(self, path=None, stat=None, skip=None):
308 return osutil.listdir(self.join(path), stat, skip)
332 return osutil.listdir(self.join(path), stat, skip)
309
333
310 def readlock(self, path):
334 def readlock(self, path):
311 return util.readlock(self.join(path))
335 return util.readlock(self.join(path))
312
336
313 def rename(self, src, dst):
337 def rename(self, src, dst):
314 return util.rename(self.join(src), self.join(dst))
338 return util.rename(self.join(src), self.join(dst))
315
339
316 def readlink(self, path):
340 def readlink(self, path):
317 return os.readlink(self.join(path))
341 return os.readlink(self.join(path))
318
342
319 def removedirs(self, path=None):
343 def removedirs(self, path=None):
320 """Remove a leaf directory and all empty intermediate ones
344 """Remove a leaf directory and all empty intermediate ones
321 """
345 """
322 return util.removedirs(self.join(path))
346 return util.removedirs(self.join(path))
323
347
324 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
348 def rmtree(self, path=None, ignore_errors=False, forcibly=False):
325 """Remove a directory tree recursively
349 """Remove a directory tree recursively
326
350
327 If ``forcibly``, this tries to remove READ-ONLY files, too.
351 If ``forcibly``, this tries to remove READ-ONLY files, too.
328 """
352 """
329 if forcibly:
353 if forcibly:
330 def onerror(function, path, excinfo):
354 def onerror(function, path, excinfo):
331 if function is not os.remove:
355 if function is not os.remove:
332 raise
356 raise
333 # read-only files cannot be unlinked under Windows
357 # read-only files cannot be unlinked under Windows
334 s = os.stat(path)
358 s = os.stat(path)
335 if (s.st_mode & stat.S_IWRITE) != 0:
359 if (s.st_mode & stat.S_IWRITE) != 0:
336 raise
360 raise
337 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
361 os.chmod(path, stat.S_IMODE(s.st_mode) | stat.S_IWRITE)
338 os.remove(path)
362 os.remove(path)
339 else:
363 else:
340 onerror = None
364 onerror = None
341 return shutil.rmtree(self.join(path),
365 return shutil.rmtree(self.join(path),
342 ignore_errors=ignore_errors, onerror=onerror)
366 ignore_errors=ignore_errors, onerror=onerror)
343
367
344 def setflags(self, path, l, x):
368 def setflags(self, path, l, x):
345 return util.setflags(self.join(path), l, x)
369 return util.setflags(self.join(path), l, x)
346
370
347 def stat(self, path=None):
371 def stat(self, path=None):
348 return os.stat(self.join(path))
372 return os.stat(self.join(path))
349
373
350 def unlink(self, path=None):
374 def unlink(self, path=None):
351 return util.unlink(self.join(path))
375 return util.unlink(self.join(path))
352
376
353 def unlinkpath(self, path=None, ignoremissing=False):
377 def unlinkpath(self, path=None, ignoremissing=False):
354 return util.unlinkpath(self.join(path), ignoremissing)
378 return util.unlinkpath(self.join(path), ignoremissing)
355
379
356 def utime(self, path=None, t=None):
380 def utime(self, path=None, t=None):
357 return os.utime(self.join(path), t)
381 return os.utime(self.join(path), t)
358
382
359 class vfs(abstractvfs):
383 class vfs(abstractvfs):
360 '''Operate files relative to a base directory
384 '''Operate files relative to a base directory
361
385
362 This class is used to hide the details of COW semantics and
386 This class is used to hide the details of COW semantics and
363 remote file access from higher level code.
387 remote file access from higher level code.
364 '''
388 '''
365 def __init__(self, base, audit=True, expandpath=False, realpath=False):
389 def __init__(self, base, audit=True, expandpath=False, realpath=False):
366 if expandpath:
390 if expandpath:
367 base = util.expandpath(base)
391 base = util.expandpath(base)
368 if realpath:
392 if realpath:
369 base = os.path.realpath(base)
393 base = os.path.realpath(base)
370 self.base = base
394 self.base = base
371 self._setmustaudit(audit)
395 self._setmustaudit(audit)
372 self.createmode = None
396 self.createmode = None
373 self._trustnlink = None
397 self._trustnlink = None
374
398
375 def _getmustaudit(self):
399 def _getmustaudit(self):
376 return self._audit
400 return self._audit
377
401
378 def _setmustaudit(self, onoff):
402 def _setmustaudit(self, onoff):
379 self._audit = onoff
403 self._audit = onoff
380 if onoff:
404 if onoff:
381 self.audit = pathutil.pathauditor(self.base)
405 self.audit = pathutil.pathauditor(self.base)
382 else:
406 else:
383 self.audit = util.always
407 self.audit = util.always
384
408
385 mustaudit = property(_getmustaudit, _setmustaudit)
409 mustaudit = property(_getmustaudit, _setmustaudit)
386
410
387 @util.propertycache
411 @util.propertycache
388 def _cansymlink(self):
412 def _cansymlink(self):
389 return util.checklink(self.base)
413 return util.checklink(self.base)
390
414
391 @util.propertycache
415 @util.propertycache
392 def _chmod(self):
416 def _chmod(self):
393 return util.checkexec(self.base)
417 return util.checkexec(self.base)
394
418
395 def _fixfilemode(self, name):
419 def _fixfilemode(self, name):
396 if self.createmode is None or not self._chmod:
420 if self.createmode is None or not self._chmod:
397 return
421 return
398 os.chmod(name, self.createmode & 0666)
422 os.chmod(name, self.createmode & 0666)
399
423
400 def __call__(self, path, mode="r", text=False, atomictemp=False,
424 def __call__(self, path, mode="r", text=False, atomictemp=False,
401 notindexed=False):
425 notindexed=False):
402 '''Open ``path`` file, which is relative to vfs root.
426 '''Open ``path`` file, which is relative to vfs root.
403
427
404 Newly created directories are marked as "not to be indexed by
428 Newly created directories are marked as "not to be indexed by
405 the content indexing service", if ``notindexed`` is specified
429 the content indexing service", if ``notindexed`` is specified
406 for "write" mode access.
430 for "write" mode access.
407 '''
431 '''
408 if self._audit:
432 if self._audit:
409 r = util.checkosfilename(path)
433 r = util.checkosfilename(path)
410 if r:
434 if r:
411 raise util.Abort("%s: %r" % (r, path))
435 raise util.Abort("%s: %r" % (r, path))
412 self.audit(path)
436 self.audit(path)
413 f = self.join(path)
437 f = self.join(path)
414
438
415 if not text and "b" not in mode:
439 if not text and "b" not in mode:
416 mode += "b" # for that other OS
440 mode += "b" # for that other OS
417
441
418 nlink = -1
442 nlink = -1
419 if mode not in ('r', 'rb'):
443 if mode not in ('r', 'rb'):
420 dirname, basename = util.split(f)
444 dirname, basename = util.split(f)
421 # If basename is empty, then the path is malformed because it points
445 # If basename is empty, then the path is malformed because it points
422 # to a directory. Let the posixfile() call below raise IOError.
446 # to a directory. Let the posixfile() call below raise IOError.
423 if basename:
447 if basename:
424 if atomictemp:
448 if atomictemp:
425 util.ensuredirs(dirname, self.createmode, notindexed)
449 util.ensuredirs(dirname, self.createmode, notindexed)
426 return util.atomictempfile(f, mode, self.createmode)
450 return util.atomictempfile(f, mode, self.createmode)
427 try:
451 try:
428 if 'w' in mode:
452 if 'w' in mode:
429 util.unlink(f)
453 util.unlink(f)
430 nlink = 0
454 nlink = 0
431 else:
455 else:
432 # nlinks() may behave differently for files on Windows
456 # nlinks() may behave differently for files on Windows
433 # shares if the file is open.
457 # shares if the file is open.
434 fd = util.posixfile(f)
458 fd = util.posixfile(f)
435 nlink = util.nlinks(f)
459 nlink = util.nlinks(f)
436 if nlink < 1:
460 if nlink < 1:
437 nlink = 2 # force mktempcopy (issue1922)
461 nlink = 2 # force mktempcopy (issue1922)
438 fd.close()
462 fd.close()
439 except (OSError, IOError), e:
463 except (OSError, IOError), e:
440 if e.errno != errno.ENOENT:
464 if e.errno != errno.ENOENT:
441 raise
465 raise
442 nlink = 0
466 nlink = 0
443 util.ensuredirs(dirname, self.createmode, notindexed)
467 util.ensuredirs(dirname, self.createmode, notindexed)
444 if nlink > 0:
468 if nlink > 0:
445 if self._trustnlink is None:
469 if self._trustnlink is None:
446 self._trustnlink = nlink > 1 or util.checknlink(f)
470 self._trustnlink = nlink > 1 or util.checknlink(f)
447 if nlink > 1 or not self._trustnlink:
471 if nlink > 1 or not self._trustnlink:
448 util.rename(util.mktempcopy(f), f)
472 util.rename(util.mktempcopy(f), f)
449 fp = util.posixfile(f, mode)
473 fp = util.posixfile(f, mode)
450 if nlink == 0:
474 if nlink == 0:
451 self._fixfilemode(f)
475 self._fixfilemode(f)
452 return fp
476 return fp
453
477
454 def symlink(self, src, dst):
478 def symlink(self, src, dst):
455 self.audit(dst)
479 self.audit(dst)
456 linkname = self.join(dst)
480 linkname = self.join(dst)
457 try:
481 try:
458 os.unlink(linkname)
482 os.unlink(linkname)
459 except OSError:
483 except OSError:
460 pass
484 pass
461
485
462 util.ensuredirs(os.path.dirname(linkname), self.createmode)
486 util.ensuredirs(os.path.dirname(linkname), self.createmode)
463
487
464 if self._cansymlink:
488 if self._cansymlink:
465 try:
489 try:
466 os.symlink(src, linkname)
490 os.symlink(src, linkname)
467 except OSError, err:
491 except OSError, err:
468 raise OSError(err.errno, _('could not symlink to %r: %s') %
492 raise OSError(err.errno, _('could not symlink to %r: %s') %
469 (src, err.strerror), linkname)
493 (src, err.strerror), linkname)
470 else:
494 else:
471 self.write(dst, src)
495 self.write(dst, src)
472
496
473 def join(self, path, *insidef):
497 def join(self, path, *insidef):
474 if path:
498 if path:
475 return os.path.join(self.base, path, *insidef)
499 return os.path.join(self.base, path, *insidef)
476 else:
500 else:
477 return self.base
501 return self.base
478
502
479 opener = vfs
503 opener = vfs
480
504
481 class auditvfs(object):
505 class auditvfs(object):
482 def __init__(self, vfs):
506 def __init__(self, vfs):
483 self.vfs = vfs
507 self.vfs = vfs
484
508
485 def _getmustaudit(self):
509 def _getmustaudit(self):
486 return self.vfs.mustaudit
510 return self.vfs.mustaudit
487
511
488 def _setmustaudit(self, onoff):
512 def _setmustaudit(self, onoff):
489 self.vfs.mustaudit = onoff
513 self.vfs.mustaudit = onoff
490
514
491 mustaudit = property(_getmustaudit, _setmustaudit)
515 mustaudit = property(_getmustaudit, _setmustaudit)
492
516
493 class filtervfs(abstractvfs, auditvfs):
517 class filtervfs(abstractvfs, auditvfs):
494 '''Wrapper vfs for filtering filenames with a function.'''
518 '''Wrapper vfs for filtering filenames with a function.'''
495
519
496 def __init__(self, vfs, filter):
520 def __init__(self, vfs, filter):
497 auditvfs.__init__(self, vfs)
521 auditvfs.__init__(self, vfs)
498 self._filter = filter
522 self._filter = filter
499
523
500 def __call__(self, path, *args, **kwargs):
524 def __call__(self, path, *args, **kwargs):
501 return self.vfs(self._filter(path), *args, **kwargs)
525 return self.vfs(self._filter(path), *args, **kwargs)
502
526
503 def join(self, path, *insidef):
527 def join(self, path, *insidef):
504 if path:
528 if path:
505 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
529 return self.vfs.join(self._filter(self.vfs.reljoin(path, *insidef)))
506 else:
530 else:
507 return self.vfs.join(path)
531 return self.vfs.join(path)
508
532
509 filteropener = filtervfs
533 filteropener = filtervfs
510
534
511 class readonlyvfs(abstractvfs, auditvfs):
535 class readonlyvfs(abstractvfs, auditvfs):
512 '''Wrapper vfs preventing any writing.'''
536 '''Wrapper vfs preventing any writing.'''
513
537
514 def __init__(self, vfs):
538 def __init__(self, vfs):
515 auditvfs.__init__(self, vfs)
539 auditvfs.__init__(self, vfs)
516
540
517 def __call__(self, path, mode='r', *args, **kw):
541 def __call__(self, path, mode='r', *args, **kw):
518 if mode not in ('r', 'rb'):
542 if mode not in ('r', 'rb'):
519 raise util.Abort('this vfs is read only')
543 raise util.Abort('this vfs is read only')
520 return self.vfs(path, mode, *args, **kw)
544 return self.vfs(path, mode, *args, **kw)
521
545
522
546
523 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
547 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
524 '''yield every hg repository under path, always recursively.
548 '''yield every hg repository under path, always recursively.
525 The recurse flag will only control recursion into repo working dirs'''
549 The recurse flag will only control recursion into repo working dirs'''
526 def errhandler(err):
550 def errhandler(err):
527 if err.filename == path:
551 if err.filename == path:
528 raise err
552 raise err
529 samestat = getattr(os.path, 'samestat', None)
553 samestat = getattr(os.path, 'samestat', None)
530 if followsym and samestat is not None:
554 if followsym and samestat is not None:
531 def adddir(dirlst, dirname):
555 def adddir(dirlst, dirname):
532 match = False
556 match = False
533 dirstat = os.stat(dirname)
557 dirstat = os.stat(dirname)
534 for lstdirstat in dirlst:
558 for lstdirstat in dirlst:
535 if samestat(dirstat, lstdirstat):
559 if samestat(dirstat, lstdirstat):
536 match = True
560 match = True
537 break
561 break
538 if not match:
562 if not match:
539 dirlst.append(dirstat)
563 dirlst.append(dirstat)
540 return not match
564 return not match
541 else:
565 else:
542 followsym = False
566 followsym = False
543
567
544 if (seen_dirs is None) and followsym:
568 if (seen_dirs is None) and followsym:
545 seen_dirs = []
569 seen_dirs = []
546 adddir(seen_dirs, path)
570 adddir(seen_dirs, path)
547 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
571 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
548 dirs.sort()
572 dirs.sort()
549 if '.hg' in dirs:
573 if '.hg' in dirs:
550 yield root # found a repository
574 yield root # found a repository
551 qroot = os.path.join(root, '.hg', 'patches')
575 qroot = os.path.join(root, '.hg', 'patches')
552 if os.path.isdir(os.path.join(qroot, '.hg')):
576 if os.path.isdir(os.path.join(qroot, '.hg')):
553 yield qroot # we have a patch queue repo here
577 yield qroot # we have a patch queue repo here
554 if recurse:
578 if recurse:
555 # avoid recursing inside the .hg directory
579 # avoid recursing inside the .hg directory
556 dirs.remove('.hg')
580 dirs.remove('.hg')
557 else:
581 else:
558 dirs[:] = [] # don't descend further
582 dirs[:] = [] # don't descend further
559 elif followsym:
583 elif followsym:
560 newdirs = []
584 newdirs = []
561 for d in dirs:
585 for d in dirs:
562 fname = os.path.join(root, d)
586 fname = os.path.join(root, d)
563 if adddir(seen_dirs, fname):
587 if adddir(seen_dirs, fname):
564 if os.path.islink(fname):
588 if os.path.islink(fname):
565 for hgname in walkrepos(fname, True, seen_dirs):
589 for hgname in walkrepos(fname, True, seen_dirs):
566 yield hgname
590 yield hgname
567 else:
591 else:
568 newdirs.append(d)
592 newdirs.append(d)
569 dirs[:] = newdirs
593 dirs[:] = newdirs
570
594
571 def osrcpath():
595 def osrcpath():
572 '''return default os-specific hgrc search path'''
596 '''return default os-specific hgrc search path'''
573 path = []
597 path = []
574 defaultpath = os.path.join(util.datapath, 'default.d')
598 defaultpath = os.path.join(util.datapath, 'default.d')
575 if os.path.isdir(defaultpath):
599 if os.path.isdir(defaultpath):
576 for f, kind in osutil.listdir(defaultpath):
600 for f, kind in osutil.listdir(defaultpath):
577 if f.endswith('.rc'):
601 if f.endswith('.rc'):
578 path.append(os.path.join(defaultpath, f))
602 path.append(os.path.join(defaultpath, f))
579 path.extend(systemrcpath())
603 path.extend(systemrcpath())
580 path.extend(userrcpath())
604 path.extend(userrcpath())
581 path = [os.path.normpath(f) for f in path]
605 path = [os.path.normpath(f) for f in path]
582 return path
606 return path
583
607
584 _rcpath = None
608 _rcpath = None
585
609
586 def rcpath():
610 def rcpath():
587 '''return hgrc search path. if env var HGRCPATH is set, use it.
611 '''return hgrc search path. if env var HGRCPATH is set, use it.
588 for each item in path, if directory, use files ending in .rc,
612 for each item in path, if directory, use files ending in .rc,
589 else use item.
613 else use item.
590 make HGRCPATH empty to only look in .hg/hgrc of current repo.
614 make HGRCPATH empty to only look in .hg/hgrc of current repo.
591 if no HGRCPATH, use default os-specific path.'''
615 if no HGRCPATH, use default os-specific path.'''
592 global _rcpath
616 global _rcpath
593 if _rcpath is None:
617 if _rcpath is None:
594 if 'HGRCPATH' in os.environ:
618 if 'HGRCPATH' in os.environ:
595 _rcpath = []
619 _rcpath = []
596 for p in os.environ['HGRCPATH'].split(os.pathsep):
620 for p in os.environ['HGRCPATH'].split(os.pathsep):
597 if not p:
621 if not p:
598 continue
622 continue
599 p = util.expandpath(p)
623 p = util.expandpath(p)
600 if os.path.isdir(p):
624 if os.path.isdir(p):
601 for f, kind in osutil.listdir(p):
625 for f, kind in osutil.listdir(p):
602 if f.endswith('.rc'):
626 if f.endswith('.rc'):
603 _rcpath.append(os.path.join(p, f))
627 _rcpath.append(os.path.join(p, f))
604 else:
628 else:
605 _rcpath.append(p)
629 _rcpath.append(p)
606 else:
630 else:
607 _rcpath = osrcpath()
631 _rcpath = osrcpath()
608 return _rcpath
632 return _rcpath
609
633
610 def intrev(repo, rev):
634 def intrev(repo, rev):
611 """Return integer for a given revision that can be used in comparison or
635 """Return integer for a given revision that can be used in comparison or
612 arithmetic operation"""
636 arithmetic operation"""
613 if rev is None:
637 if rev is None:
614 return len(repo)
638 return len(repo)
615 return rev
639 return rev
616
640
617 def revsingle(repo, revspec, default='.'):
641 def revsingle(repo, revspec, default='.'):
618 if not revspec and revspec != 0:
642 if not revspec and revspec != 0:
619 return repo[default]
643 return repo[default]
620
644
621 l = revrange(repo, [revspec])
645 l = revrange(repo, [revspec])
622 if not l:
646 if not l:
623 raise util.Abort(_('empty revision set'))
647 raise util.Abort(_('empty revision set'))
624 return repo[l.last()]
648 return repo[l.last()]
625
649
626 def revpair(repo, revs):
650 def revpair(repo, revs):
627 if not revs:
651 if not revs:
628 return repo.dirstate.p1(), None
652 return repo.dirstate.p1(), None
629
653
630 l = revrange(repo, revs)
654 l = revrange(repo, revs)
631
655
632 if not l:
656 if not l:
633 first = second = None
657 first = second = None
634 elif l.isascending():
658 elif l.isascending():
635 first = l.min()
659 first = l.min()
636 second = l.max()
660 second = l.max()
637 elif l.isdescending():
661 elif l.isdescending():
638 first = l.max()
662 first = l.max()
639 second = l.min()
663 second = l.min()
640 else:
664 else:
641 first = l.first()
665 first = l.first()
642 second = l.last()
666 second = l.last()
643
667
644 if first is None:
668 if first is None:
645 raise util.Abort(_('empty revision range'))
669 raise util.Abort(_('empty revision range'))
646
670
647 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
671 if first == second and len(revs) == 1 and _revrangesep not in revs[0]:
648 return repo.lookup(first), None
672 return repo.lookup(first), None
649
673
650 return repo.lookup(first), repo.lookup(second)
674 return repo.lookup(first), repo.lookup(second)
651
675
652 _revrangesep = ':'
676 _revrangesep = ':'
653
677
654 def revrange(repo, revs):
678 def revrange(repo, revs):
655 """Yield revision as strings from a list of revision specifications."""
679 """Yield revision as strings from a list of revision specifications."""
656
680
657 def revfix(repo, val, defval):
681 def revfix(repo, val, defval):
658 if not val and val != 0 and defval is not None:
682 if not val and val != 0 and defval is not None:
659 return defval
683 return defval
660 return repo[val].rev()
684 return repo[val].rev()
661
685
662 seen, l = set(), revset.baseset([])
686 seen, l = set(), revset.baseset([])
663
687
664 revsetaliases = [alias for (alias, _) in
688 revsetaliases = [alias for (alias, _) in
665 repo.ui.configitems("revsetalias")]
689 repo.ui.configitems("revsetalias")]
666
690
667 for spec in revs:
691 for spec in revs:
668 if l and not seen:
692 if l and not seen:
669 seen = set(l)
693 seen = set(l)
670 # attempt to parse old-style ranges first to deal with
694 # attempt to parse old-style ranges first to deal with
671 # things like old-tag which contain query metacharacters
695 # things like old-tag which contain query metacharacters
672 try:
696 try:
673 # ... except for revset aliases without arguments. These
697 # ... except for revset aliases without arguments. These
674 # should be parsed as soon as possible, because they might
698 # should be parsed as soon as possible, because they might
675 # clash with a hash prefix.
699 # clash with a hash prefix.
676 if spec in revsetaliases:
700 if spec in revsetaliases:
677 raise error.RepoLookupError
701 raise error.RepoLookupError
678
702
679 if isinstance(spec, int):
703 if isinstance(spec, int):
680 seen.add(spec)
704 seen.add(spec)
681 l = l + revset.baseset([spec])
705 l = l + revset.baseset([spec])
682 continue
706 continue
683
707
684 if _revrangesep in spec:
708 if _revrangesep in spec:
685 start, end = spec.split(_revrangesep, 1)
709 start, end = spec.split(_revrangesep, 1)
686 if start in revsetaliases or end in revsetaliases:
710 if start in revsetaliases or end in revsetaliases:
687 raise error.RepoLookupError
711 raise error.RepoLookupError
688
712
689 start = revfix(repo, start, 0)
713 start = revfix(repo, start, 0)
690 end = revfix(repo, end, len(repo) - 1)
714 end = revfix(repo, end, len(repo) - 1)
691 if end == nullrev and start < 0:
715 if end == nullrev and start < 0:
692 start = nullrev
716 start = nullrev
693 rangeiter = repo.changelog.revs(start, end)
717 rangeiter = repo.changelog.revs(start, end)
694 if not seen and not l:
718 if not seen and not l:
695 # by far the most common case: revs = ["-1:0"]
719 # by far the most common case: revs = ["-1:0"]
696 l = revset.baseset(rangeiter)
720 l = revset.baseset(rangeiter)
697 # defer syncing seen until next iteration
721 # defer syncing seen until next iteration
698 continue
722 continue
699 newrevs = set(rangeiter)
723 newrevs = set(rangeiter)
700 if seen:
724 if seen:
701 newrevs.difference_update(seen)
725 newrevs.difference_update(seen)
702 seen.update(newrevs)
726 seen.update(newrevs)
703 else:
727 else:
704 seen = newrevs
728 seen = newrevs
705 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
729 l = l + revset.baseset(sorted(newrevs, reverse=start > end))
706 continue
730 continue
707 elif spec and spec in repo: # single unquoted rev
731 elif spec and spec in repo: # single unquoted rev
708 rev = revfix(repo, spec, None)
732 rev = revfix(repo, spec, None)
709 if rev in seen:
733 if rev in seen:
710 continue
734 continue
711 seen.add(rev)
735 seen.add(rev)
712 l = l + revset.baseset([rev])
736 l = l + revset.baseset([rev])
713 continue
737 continue
714 except error.RepoLookupError:
738 except error.RepoLookupError:
715 pass
739 pass
716
740
717 # fall through to new-style queries if old-style fails
741 # fall through to new-style queries if old-style fails
718 m = revset.match(repo.ui, spec, repo)
742 m = revset.match(repo.ui, spec, repo)
719 if seen or l:
743 if seen or l:
720 dl = [r for r in m(repo) if r not in seen]
744 dl = [r for r in m(repo) if r not in seen]
721 l = l + revset.baseset(dl)
745 l = l + revset.baseset(dl)
722 seen.update(dl)
746 seen.update(dl)
723 else:
747 else:
724 l = m(repo)
748 l = m(repo)
725
749
726 return l
750 return l
727
751
728 def expandpats(pats):
752 def expandpats(pats):
729 '''Expand bare globs when running on windows.
753 '''Expand bare globs when running on windows.
730 On posix we assume it already has already been done by sh.'''
754 On posix we assume it already has already been done by sh.'''
731 if not util.expandglobs:
755 if not util.expandglobs:
732 return list(pats)
756 return list(pats)
733 ret = []
757 ret = []
734 for kindpat in pats:
758 for kindpat in pats:
735 kind, pat = matchmod._patsplit(kindpat, None)
759 kind, pat = matchmod._patsplit(kindpat, None)
736 if kind is None:
760 if kind is None:
737 try:
761 try:
738 globbed = glob.glob(pat)
762 globbed = glob.glob(pat)
739 except re.error:
763 except re.error:
740 globbed = [pat]
764 globbed = [pat]
741 if globbed:
765 if globbed:
742 ret.extend(globbed)
766 ret.extend(globbed)
743 continue
767 continue
744 ret.append(kindpat)
768 ret.append(kindpat)
745 return ret
769 return ret
746
770
747 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
771 def matchandpats(ctx, pats=[], opts={}, globbed=False, default='relpath'):
748 '''Return a matcher and the patterns that were used.
772 '''Return a matcher and the patterns that were used.
749 The matcher will warn about bad matches.'''
773 The matcher will warn about bad matches.'''
750 if pats == ("",):
774 if pats == ("",):
751 pats = []
775 pats = []
752 if not globbed and default == 'relpath':
776 if not globbed and default == 'relpath':
753 pats = expandpats(pats or [])
777 pats = expandpats(pats or [])
754
778
755 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
779 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
756 default)
780 default)
757 def badfn(f, msg):
781 def badfn(f, msg):
758 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
782 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
759 m.bad = badfn
783 m.bad = badfn
760 if m.always():
784 if m.always():
761 pats = []
785 pats = []
762 return m, pats
786 return m, pats
763
787
764 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
788 def match(ctx, pats=[], opts={}, globbed=False, default='relpath'):
765 '''Return a matcher that will warn about bad matches.'''
789 '''Return a matcher that will warn about bad matches.'''
766 return matchandpats(ctx, pats, opts, globbed, default)[0]
790 return matchandpats(ctx, pats, opts, globbed, default)[0]
767
791
768 def matchall(repo):
792 def matchall(repo):
769 '''Return a matcher that will efficiently match everything.'''
793 '''Return a matcher that will efficiently match everything.'''
770 return matchmod.always(repo.root, repo.getcwd())
794 return matchmod.always(repo.root, repo.getcwd())
771
795
772 def matchfiles(repo, files):
796 def matchfiles(repo, files):
773 '''Return a matcher that will efficiently match exactly these files.'''
797 '''Return a matcher that will efficiently match exactly these files.'''
774 return matchmod.exact(repo.root, repo.getcwd(), files)
798 return matchmod.exact(repo.root, repo.getcwd(), files)
775
799
776 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
800 def addremove(repo, matcher, prefix, opts={}, dry_run=None, similarity=None):
777 m = matcher
801 m = matcher
778 if dry_run is None:
802 if dry_run is None:
779 dry_run = opts.get('dry_run')
803 dry_run = opts.get('dry_run')
780 if similarity is None:
804 if similarity is None:
781 similarity = float(opts.get('similarity') or 0)
805 similarity = float(opts.get('similarity') or 0)
782
806
783 ret = 0
807 ret = 0
784 join = lambda f: os.path.join(prefix, f)
808 join = lambda f: os.path.join(prefix, f)
785
809
786 def matchessubrepo(matcher, subpath):
810 def matchessubrepo(matcher, subpath):
787 if matcher.exact(subpath):
811 if matcher.exact(subpath):
788 return True
812 return True
789 for f in matcher.files():
813 for f in matcher.files():
790 if f.startswith(subpath):
814 if f.startswith(subpath):
791 return True
815 return True
792 return False
816 return False
793
817
794 wctx = repo[None]
818 wctx = repo[None]
795 for subpath in sorted(wctx.substate):
819 for subpath in sorted(wctx.substate):
796 if opts.get('subrepos') or matchessubrepo(m, subpath):
820 if opts.get('subrepos') or matchessubrepo(m, subpath):
797 sub = wctx.sub(subpath)
821 sub = wctx.sub(subpath)
798 try:
822 try:
799 submatch = matchmod.narrowmatcher(subpath, m)
823 submatch = matchmod.narrowmatcher(subpath, m)
800 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
824 if sub.addremove(submatch, prefix, opts, dry_run, similarity):
801 ret = 1
825 ret = 1
802 except error.LookupError:
826 except error.LookupError:
803 repo.ui.status(_("skipping missing subrepository: %s\n")
827 repo.ui.status(_("skipping missing subrepository: %s\n")
804 % join(subpath))
828 % join(subpath))
805
829
806 rejected = []
830 rejected = []
807 origbad = m.bad
831 origbad = m.bad
808 def badfn(f, msg):
832 def badfn(f, msg):
809 if f in m.files():
833 if f in m.files():
810 origbad(f, msg)
834 origbad(f, msg)
811 rejected.append(f)
835 rejected.append(f)
812
836
813 m.bad = badfn
837 m.bad = badfn
814 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
838 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
815 m.bad = origbad
839 m.bad = origbad
816
840
817 unknownset = set(unknown + forgotten)
841 unknownset = set(unknown + forgotten)
818 toprint = unknownset.copy()
842 toprint = unknownset.copy()
819 toprint.update(deleted)
843 toprint.update(deleted)
820 for abs in sorted(toprint):
844 for abs in sorted(toprint):
821 if repo.ui.verbose or not m.exact(abs):
845 if repo.ui.verbose or not m.exact(abs):
822 if abs in unknownset:
846 if abs in unknownset:
823 status = _('adding %s\n') % m.uipath(abs)
847 status = _('adding %s\n') % m.uipath(abs)
824 else:
848 else:
825 status = _('removing %s\n') % m.uipath(abs)
849 status = _('removing %s\n') % m.uipath(abs)
826 repo.ui.status(status)
850 repo.ui.status(status)
827
851
828 renames = _findrenames(repo, m, added + unknown, removed + deleted,
852 renames = _findrenames(repo, m, added + unknown, removed + deleted,
829 similarity)
853 similarity)
830
854
831 if not dry_run:
855 if not dry_run:
832 _markchanges(repo, unknown + forgotten, deleted, renames)
856 _markchanges(repo, unknown + forgotten, deleted, renames)
833
857
834 for f in rejected:
858 for f in rejected:
835 if f in m.files():
859 if f in m.files():
836 return 1
860 return 1
837 return ret
861 return ret
838
862
839 def marktouched(repo, files, similarity=0.0):
863 def marktouched(repo, files, similarity=0.0):
840 '''Assert that files have somehow been operated upon. files are relative to
864 '''Assert that files have somehow been operated upon. files are relative to
841 the repo root.'''
865 the repo root.'''
842 m = matchfiles(repo, files)
866 m = matchfiles(repo, files)
843 rejected = []
867 rejected = []
844 m.bad = lambda x, y: rejected.append(x)
868 m.bad = lambda x, y: rejected.append(x)
845
869
846 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
870 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
847
871
848 if repo.ui.verbose:
872 if repo.ui.verbose:
849 unknownset = set(unknown + forgotten)
873 unknownset = set(unknown + forgotten)
850 toprint = unknownset.copy()
874 toprint = unknownset.copy()
851 toprint.update(deleted)
875 toprint.update(deleted)
852 for abs in sorted(toprint):
876 for abs in sorted(toprint):
853 if abs in unknownset:
877 if abs in unknownset:
854 status = _('adding %s\n') % abs
878 status = _('adding %s\n') % abs
855 else:
879 else:
856 status = _('removing %s\n') % abs
880 status = _('removing %s\n') % abs
857 repo.ui.status(status)
881 repo.ui.status(status)
858
882
859 renames = _findrenames(repo, m, added + unknown, removed + deleted,
883 renames = _findrenames(repo, m, added + unknown, removed + deleted,
860 similarity)
884 similarity)
861
885
862 _markchanges(repo, unknown + forgotten, deleted, renames)
886 _markchanges(repo, unknown + forgotten, deleted, renames)
863
887
864 for f in rejected:
888 for f in rejected:
865 if f in m.files():
889 if f in m.files():
866 return 1
890 return 1
867 return 0
891 return 0
868
892
869 def _interestingfiles(repo, matcher):
893 def _interestingfiles(repo, matcher):
870 '''Walk dirstate with matcher, looking for files that addremove would care
894 '''Walk dirstate with matcher, looking for files that addremove would care
871 about.
895 about.
872
896
873 This is different from dirstate.status because it doesn't care about
897 This is different from dirstate.status because it doesn't care about
874 whether files are modified or clean.'''
898 whether files are modified or clean.'''
875 added, unknown, deleted, removed, forgotten = [], [], [], [], []
899 added, unknown, deleted, removed, forgotten = [], [], [], [], []
876 audit_path = pathutil.pathauditor(repo.root)
900 audit_path = pathutil.pathauditor(repo.root)
877
901
878 ctx = repo[None]
902 ctx = repo[None]
879 dirstate = repo.dirstate
903 dirstate = repo.dirstate
880 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
904 walkresults = dirstate.walk(matcher, sorted(ctx.substate), True, False,
881 full=False)
905 full=False)
882 for abs, st in walkresults.iteritems():
906 for abs, st in walkresults.iteritems():
883 dstate = dirstate[abs]
907 dstate = dirstate[abs]
884 if dstate == '?' and audit_path.check(abs):
908 if dstate == '?' and audit_path.check(abs):
885 unknown.append(abs)
909 unknown.append(abs)
886 elif dstate != 'r' and not st:
910 elif dstate != 'r' and not st:
887 deleted.append(abs)
911 deleted.append(abs)
888 elif dstate == 'r' and st:
912 elif dstate == 'r' and st:
889 forgotten.append(abs)
913 forgotten.append(abs)
890 # for finding renames
914 # for finding renames
891 elif dstate == 'r' and not st:
915 elif dstate == 'r' and not st:
892 removed.append(abs)
916 removed.append(abs)
893 elif dstate == 'a':
917 elif dstate == 'a':
894 added.append(abs)
918 added.append(abs)
895
919
896 return added, unknown, deleted, removed, forgotten
920 return added, unknown, deleted, removed, forgotten
897
921
898 def _findrenames(repo, matcher, added, removed, similarity):
922 def _findrenames(repo, matcher, added, removed, similarity):
899 '''Find renames from removed files to added ones.'''
923 '''Find renames from removed files to added ones.'''
900 renames = {}
924 renames = {}
901 if similarity > 0:
925 if similarity > 0:
902 for old, new, score in similar.findrenames(repo, added, removed,
926 for old, new, score in similar.findrenames(repo, added, removed,
903 similarity):
927 similarity):
904 if (repo.ui.verbose or not matcher.exact(old)
928 if (repo.ui.verbose or not matcher.exact(old)
905 or not matcher.exact(new)):
929 or not matcher.exact(new)):
906 repo.ui.status(_('recording removal of %s as rename to %s '
930 repo.ui.status(_('recording removal of %s as rename to %s '
907 '(%d%% similar)\n') %
931 '(%d%% similar)\n') %
908 (matcher.rel(old), matcher.rel(new),
932 (matcher.rel(old), matcher.rel(new),
909 score * 100))
933 score * 100))
910 renames[new] = old
934 renames[new] = old
911 return renames
935 return renames
912
936
913 def _markchanges(repo, unknown, deleted, renames):
937 def _markchanges(repo, unknown, deleted, renames):
914 '''Marks the files in unknown as added, the files in deleted as removed,
938 '''Marks the files in unknown as added, the files in deleted as removed,
915 and the files in renames as copied.'''
939 and the files in renames as copied.'''
916 wctx = repo[None]
940 wctx = repo[None]
917 wlock = repo.wlock()
941 wlock = repo.wlock()
918 try:
942 try:
919 wctx.forget(deleted)
943 wctx.forget(deleted)
920 wctx.add(unknown)
944 wctx.add(unknown)
921 for new, old in renames.iteritems():
945 for new, old in renames.iteritems():
922 wctx.copy(old, new)
946 wctx.copy(old, new)
923 finally:
947 finally:
924 wlock.release()
948 wlock.release()
925
949
926 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
950 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
927 """Update the dirstate to reflect the intent of copying src to dst. For
951 """Update the dirstate to reflect the intent of copying src to dst. For
928 different reasons it might not end with dst being marked as copied from src.
952 different reasons it might not end with dst being marked as copied from src.
929 """
953 """
930 origsrc = repo.dirstate.copied(src) or src
954 origsrc = repo.dirstate.copied(src) or src
931 if dst == origsrc: # copying back a copy?
955 if dst == origsrc: # copying back a copy?
932 if repo.dirstate[dst] not in 'mn' and not dryrun:
956 if repo.dirstate[dst] not in 'mn' and not dryrun:
933 repo.dirstate.normallookup(dst)
957 repo.dirstate.normallookup(dst)
934 else:
958 else:
935 if repo.dirstate[origsrc] == 'a' and origsrc == src:
959 if repo.dirstate[origsrc] == 'a' and origsrc == src:
936 if not ui.quiet:
960 if not ui.quiet:
937 ui.warn(_("%s has not been committed yet, so no copy "
961 ui.warn(_("%s has not been committed yet, so no copy "
938 "data will be stored for %s.\n")
962 "data will be stored for %s.\n")
939 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
963 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
940 if repo.dirstate[dst] in '?r' and not dryrun:
964 if repo.dirstate[dst] in '?r' and not dryrun:
941 wctx.add([dst])
965 wctx.add([dst])
942 elif not dryrun:
966 elif not dryrun:
943 wctx.copy(origsrc, dst)
967 wctx.copy(origsrc, dst)
944
968
945 def readrequires(opener, supported):
969 def readrequires(opener, supported):
946 '''Reads and parses .hg/requires and checks if all entries found
970 '''Reads and parses .hg/requires and checks if all entries found
947 are in the list of supported features.'''
971 are in the list of supported features.'''
948 requirements = set(opener.read("requires").splitlines())
972 requirements = set(opener.read("requires").splitlines())
949 missings = []
973 missings = []
950 for r in requirements:
974 for r in requirements:
951 if r not in supported:
975 if r not in supported:
952 if not r or not r[0].isalnum():
976 if not r or not r[0].isalnum():
953 raise error.RequirementError(_(".hg/requires file is corrupt"))
977 raise error.RequirementError(_(".hg/requires file is corrupt"))
954 missings.append(r)
978 missings.append(r)
955 missings.sort()
979 missings.sort()
956 if missings:
980 if missings:
957 raise error.RequirementError(
981 raise error.RequirementError(
958 _("repository requires features unknown to this Mercurial: %s")
982 _("repository requires features unknown to this Mercurial: %s")
959 % " ".join(missings),
983 % " ".join(missings),
960 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
984 hint=_("see http://mercurial.selenic.com/wiki/MissingRequirement"
961 " for more information"))
985 " for more information"))
962 return requirements
986 return requirements
963
987
964 class filecachesubentry(object):
988 class filecachesubentry(object):
965 def __init__(self, path, stat):
989 def __init__(self, path, stat):
966 self.path = path
990 self.path = path
967 self.cachestat = None
991 self.cachestat = None
968 self._cacheable = None
992 self._cacheable = None
969
993
970 if stat:
994 if stat:
971 self.cachestat = filecachesubentry.stat(self.path)
995 self.cachestat = filecachesubentry.stat(self.path)
972
996
973 if self.cachestat:
997 if self.cachestat:
974 self._cacheable = self.cachestat.cacheable()
998 self._cacheable = self.cachestat.cacheable()
975 else:
999 else:
976 # None means we don't know yet
1000 # None means we don't know yet
977 self._cacheable = None
1001 self._cacheable = None
978
1002
979 def refresh(self):
1003 def refresh(self):
980 if self.cacheable():
1004 if self.cacheable():
981 self.cachestat = filecachesubentry.stat(self.path)
1005 self.cachestat = filecachesubentry.stat(self.path)
982
1006
983 def cacheable(self):
1007 def cacheable(self):
984 if self._cacheable is not None:
1008 if self._cacheable is not None:
985 return self._cacheable
1009 return self._cacheable
986
1010
987 # we don't know yet, assume it is for now
1011 # we don't know yet, assume it is for now
988 return True
1012 return True
989
1013
990 def changed(self):
1014 def changed(self):
991 # no point in going further if we can't cache it
1015 # no point in going further if we can't cache it
992 if not self.cacheable():
1016 if not self.cacheable():
993 return True
1017 return True
994
1018
995 newstat = filecachesubentry.stat(self.path)
1019 newstat = filecachesubentry.stat(self.path)
996
1020
997 # we may not know if it's cacheable yet, check again now
1021 # we may not know if it's cacheable yet, check again now
998 if newstat and self._cacheable is None:
1022 if newstat and self._cacheable is None:
999 self._cacheable = newstat.cacheable()
1023 self._cacheable = newstat.cacheable()
1000
1024
1001 # check again
1025 # check again
1002 if not self._cacheable:
1026 if not self._cacheable:
1003 return True
1027 return True
1004
1028
1005 if self.cachestat != newstat:
1029 if self.cachestat != newstat:
1006 self.cachestat = newstat
1030 self.cachestat = newstat
1007 return True
1031 return True
1008 else:
1032 else:
1009 return False
1033 return False
1010
1034
1011 @staticmethod
1035 @staticmethod
1012 def stat(path):
1036 def stat(path):
1013 try:
1037 try:
1014 return util.cachestat(path)
1038 return util.cachestat(path)
1015 except OSError, e:
1039 except OSError, e:
1016 if e.errno != errno.ENOENT:
1040 if e.errno != errno.ENOENT:
1017 raise
1041 raise
1018
1042
1019 class filecacheentry(object):
1043 class filecacheentry(object):
1020 def __init__(self, paths, stat=True):
1044 def __init__(self, paths, stat=True):
1021 self._entries = []
1045 self._entries = []
1022 for path in paths:
1046 for path in paths:
1023 self._entries.append(filecachesubentry(path, stat))
1047 self._entries.append(filecachesubentry(path, stat))
1024
1048
1025 def changed(self):
1049 def changed(self):
1026 '''true if any entry has changed'''
1050 '''true if any entry has changed'''
1027 for entry in self._entries:
1051 for entry in self._entries:
1028 if entry.changed():
1052 if entry.changed():
1029 return True
1053 return True
1030 return False
1054 return False
1031
1055
1032 def refresh(self):
1056 def refresh(self):
1033 for entry in self._entries:
1057 for entry in self._entries:
1034 entry.refresh()
1058 entry.refresh()
1035
1059
1036 class filecache(object):
1060 class filecache(object):
1037 '''A property like decorator that tracks files under .hg/ for updates.
1061 '''A property like decorator that tracks files under .hg/ for updates.
1038
1062
1039 Records stat info when called in _filecache.
1063 Records stat info when called in _filecache.
1040
1064
1041 On subsequent calls, compares old stat info with new info, and recreates the
1065 On subsequent calls, compares old stat info with new info, and recreates the
1042 object when any of the files changes, updating the new stat info in
1066 object when any of the files changes, updating the new stat info in
1043 _filecache.
1067 _filecache.
1044
1068
1045 Mercurial either atomic renames or appends for files under .hg,
1069 Mercurial either atomic renames or appends for files under .hg,
1046 so to ensure the cache is reliable we need the filesystem to be able
1070 so to ensure the cache is reliable we need the filesystem to be able
1047 to tell us if a file has been replaced. If it can't, we fallback to
1071 to tell us if a file has been replaced. If it can't, we fallback to
1048 recreating the object on every call (essentially the same behaviour as
1072 recreating the object on every call (essentially the same behaviour as
1049 propertycache).
1073 propertycache).
1050
1074
1051 '''
1075 '''
1052 def __init__(self, *paths):
1076 def __init__(self, *paths):
1053 self.paths = paths
1077 self.paths = paths
1054
1078
1055 def join(self, obj, fname):
1079 def join(self, obj, fname):
1056 """Used to compute the runtime path of a cached file.
1080 """Used to compute the runtime path of a cached file.
1057
1081
1058 Users should subclass filecache and provide their own version of this
1082 Users should subclass filecache and provide their own version of this
1059 function to call the appropriate join function on 'obj' (an instance
1083 function to call the appropriate join function on 'obj' (an instance
1060 of the class that its member function was decorated).
1084 of the class that its member function was decorated).
1061 """
1085 """
1062 return obj.join(fname)
1086 return obj.join(fname)
1063
1087
1064 def __call__(self, func):
1088 def __call__(self, func):
1065 self.func = func
1089 self.func = func
1066 self.name = func.__name__
1090 self.name = func.__name__
1067 return self
1091 return self
1068
1092
1069 def __get__(self, obj, type=None):
1093 def __get__(self, obj, type=None):
1070 # do we need to check if the file changed?
1094 # do we need to check if the file changed?
1071 if self.name in obj.__dict__:
1095 if self.name in obj.__dict__:
1072 assert self.name in obj._filecache, self.name
1096 assert self.name in obj._filecache, self.name
1073 return obj.__dict__[self.name]
1097 return obj.__dict__[self.name]
1074
1098
1075 entry = obj._filecache.get(self.name)
1099 entry = obj._filecache.get(self.name)
1076
1100
1077 if entry:
1101 if entry:
1078 if entry.changed():
1102 if entry.changed():
1079 entry.obj = self.func(obj)
1103 entry.obj = self.func(obj)
1080 else:
1104 else:
1081 paths = [self.join(obj, path) for path in self.paths]
1105 paths = [self.join(obj, path) for path in self.paths]
1082
1106
1083 # We stat -before- creating the object so our cache doesn't lie if
1107 # We stat -before- creating the object so our cache doesn't lie if
1084 # a writer modified between the time we read and stat
1108 # a writer modified between the time we read and stat
1085 entry = filecacheentry(paths, True)
1109 entry = filecacheentry(paths, True)
1086 entry.obj = self.func(obj)
1110 entry.obj = self.func(obj)
1087
1111
1088 obj._filecache[self.name] = entry
1112 obj._filecache[self.name] = entry
1089
1113
1090 obj.__dict__[self.name] = entry.obj
1114 obj.__dict__[self.name] = entry.obj
1091 return entry.obj
1115 return entry.obj
1092
1116
1093 def __set__(self, obj, value):
1117 def __set__(self, obj, value):
1094 if self.name not in obj._filecache:
1118 if self.name not in obj._filecache:
1095 # we add an entry for the missing value because X in __dict__
1119 # we add an entry for the missing value because X in __dict__
1096 # implies X in _filecache
1120 # implies X in _filecache
1097 paths = [self.join(obj, path) for path in self.paths]
1121 paths = [self.join(obj, path) for path in self.paths]
1098 ce = filecacheentry(paths, False)
1122 ce = filecacheentry(paths, False)
1099 obj._filecache[self.name] = ce
1123 obj._filecache[self.name] = ce
1100 else:
1124 else:
1101 ce = obj._filecache[self.name]
1125 ce = obj._filecache[self.name]
1102
1126
1103 ce.obj = value # update cached copy
1127 ce.obj = value # update cached copy
1104 obj.__dict__[self.name] = value # update copy returned by obj.x
1128 obj.__dict__[self.name] = value # update copy returned by obj.x
1105
1129
1106 def __delete__(self, obj):
1130 def __delete__(self, obj):
1107 try:
1131 try:
1108 del obj.__dict__[self.name]
1132 del obj.__dict__[self.name]
1109 except KeyError:
1133 except KeyError:
1110 raise AttributeError(self.name)
1134 raise AttributeError(self.name)
General Comments 0
You need to be logged in to leave comments. Login now