##// END OF EJS Templates
revbranchcache: move out of branchmap onto localrepo...
Durham Goode -
r24373:59cc0924 default
parent child Browse files
Show More
@@ -1,449 +1,448 b''
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev
8 from node import bin, hex, nullid, nullrev
9 import encoding
9 import encoding
10 import util
10 import util
11 import time
11 import time
12 from array import array
12 from array import array
13 from struct import calcsize, pack, unpack
13 from struct import calcsize, pack, unpack
14
14
15 def _filename(repo):
15 def _filename(repo):
16 """name of a branchcache file for a given repo or repoview"""
16 """name of a branchcache file for a given repo or repoview"""
17 filename = "cache/branch2"
17 filename = "cache/branch2"
18 if repo.filtername:
18 if repo.filtername:
19 filename = '%s-%s' % (filename, repo.filtername)
19 filename = '%s-%s' % (filename, repo.filtername)
20 return filename
20 return filename
21
21
22 def read(repo):
22 def read(repo):
23 try:
23 try:
24 f = repo.vfs(_filename(repo))
24 f = repo.vfs(_filename(repo))
25 lines = f.read().split('\n')
25 lines = f.read().split('\n')
26 f.close()
26 f.close()
27 except (IOError, OSError):
27 except (IOError, OSError):
28 return None
28 return None
29
29
30 try:
30 try:
31 cachekey = lines.pop(0).split(" ", 2)
31 cachekey = lines.pop(0).split(" ", 2)
32 last, lrev = cachekey[:2]
32 last, lrev = cachekey[:2]
33 last, lrev = bin(last), int(lrev)
33 last, lrev = bin(last), int(lrev)
34 filteredhash = None
34 filteredhash = None
35 if len(cachekey) > 2:
35 if len(cachekey) > 2:
36 filteredhash = bin(cachekey[2])
36 filteredhash = bin(cachekey[2])
37 partial = branchcache(tipnode=last, tiprev=lrev,
37 partial = branchcache(tipnode=last, tiprev=lrev,
38 filteredhash=filteredhash)
38 filteredhash=filteredhash)
39 if not partial.validfor(repo):
39 if not partial.validfor(repo):
40 # invalidate the cache
40 # invalidate the cache
41 raise ValueError('tip differs')
41 raise ValueError('tip differs')
42 for l in lines:
42 for l in lines:
43 if not l:
43 if not l:
44 continue
44 continue
45 node, state, label = l.split(" ", 2)
45 node, state, label = l.split(" ", 2)
46 if state not in 'oc':
46 if state not in 'oc':
47 raise ValueError('invalid branch state')
47 raise ValueError('invalid branch state')
48 label = encoding.tolocal(label.strip())
48 label = encoding.tolocal(label.strip())
49 if not node in repo:
49 if not node in repo:
50 raise ValueError('node %s does not exist' % node)
50 raise ValueError('node %s does not exist' % node)
51 node = bin(node)
51 node = bin(node)
52 partial.setdefault(label, []).append(node)
52 partial.setdefault(label, []).append(node)
53 if state == 'c':
53 if state == 'c':
54 partial._closednodes.add(node)
54 partial._closednodes.add(node)
55 except KeyboardInterrupt:
55 except KeyboardInterrupt:
56 raise
56 raise
57 except Exception, inst:
57 except Exception, inst:
58 if repo.ui.debugflag:
58 if repo.ui.debugflag:
59 msg = 'invalid branchheads cache'
59 msg = 'invalid branchheads cache'
60 if repo.filtername is not None:
60 if repo.filtername is not None:
61 msg += ' (%s)' % repo.filtername
61 msg += ' (%s)' % repo.filtername
62 msg += ': %s\n'
62 msg += ': %s\n'
63 repo.ui.debug(msg % inst)
63 repo.ui.debug(msg % inst)
64 partial = None
64 partial = None
65 return partial
65 return partial
66
66
67 ### Nearest subset relation
67 ### Nearest subset relation
68 # Nearest subset of filter X is a filter Y so that:
68 # Nearest subset of filter X is a filter Y so that:
69 # * Y is included in X,
69 # * Y is included in X,
70 # * X - Y is as small as possible.
70 # * X - Y is as small as possible.
71 # This create and ordering used for branchmap purpose.
71 # This create and ordering used for branchmap purpose.
72 # the ordering may be partial
72 # the ordering may be partial
73 subsettable = {None: 'visible',
73 subsettable = {None: 'visible',
74 'visible': 'served',
74 'visible': 'served',
75 'served': 'immutable',
75 'served': 'immutable',
76 'immutable': 'base'}
76 'immutable': 'base'}
77
77
78 def updatecache(repo):
78 def updatecache(repo):
79 cl = repo.changelog
79 cl = repo.changelog
80 filtername = repo.filtername
80 filtername = repo.filtername
81 partial = repo._branchcaches.get(filtername)
81 partial = repo._branchcaches.get(filtername)
82
82
83 revs = []
83 revs = []
84 if partial is None or not partial.validfor(repo):
84 if partial is None or not partial.validfor(repo):
85 partial = read(repo)
85 partial = read(repo)
86 if partial is None:
86 if partial is None:
87 subsetname = subsettable.get(filtername)
87 subsetname = subsettable.get(filtername)
88 if subsetname is None:
88 if subsetname is None:
89 partial = branchcache()
89 partial = branchcache()
90 else:
90 else:
91 subset = repo.filtered(subsetname)
91 subset = repo.filtered(subsetname)
92 partial = subset.branchmap().copy()
92 partial = subset.branchmap().copy()
93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
95 revs.extend(cl.revs(start=partial.tiprev + 1))
95 revs.extend(cl.revs(start=partial.tiprev + 1))
96 if revs:
96 if revs:
97 partial.update(repo, revs)
97 partial.update(repo, revs)
98 partial.write(repo)
98 partial.write(repo)
99
100 if repo._revbranchcache is not None:
101 repo._revbranchcache.write(repo)
102
99 assert partial.validfor(repo), filtername
103 assert partial.validfor(repo), filtername
100 repo._branchcaches[repo.filtername] = partial
104 repo._branchcaches[repo.filtername] = partial
101
105
102 class branchcache(dict):
106 class branchcache(dict):
103 """A dict like object that hold branches heads cache.
107 """A dict like object that hold branches heads cache.
104
108
105 This cache is used to avoid costly computations to determine all the
109 This cache is used to avoid costly computations to determine all the
106 branch heads of a repo.
110 branch heads of a repo.
107
111
108 The cache is serialized on disk in the following format:
112 The cache is serialized on disk in the following format:
109
113
110 <tip hex node> <tip rev number> [optional filtered repo hex hash]
114 <tip hex node> <tip rev number> [optional filtered repo hex hash]
111 <branch head hex node> <open/closed state> <branch name>
115 <branch head hex node> <open/closed state> <branch name>
112 <branch head hex node> <open/closed state> <branch name>
116 <branch head hex node> <open/closed state> <branch name>
113 ...
117 ...
114
118
115 The first line is used to check if the cache is still valid. If the
119 The first line is used to check if the cache is still valid. If the
116 branch cache is for a filtered repo view, an optional third hash is
120 branch cache is for a filtered repo view, an optional third hash is
117 included that hashes the hashes of all filtered revisions.
121 included that hashes the hashes of all filtered revisions.
118
122
119 The open/closed state is represented by a single letter 'o' or 'c'.
123 The open/closed state is represented by a single letter 'o' or 'c'.
120 This field can be used to avoid changelog reads when determining if a
124 This field can be used to avoid changelog reads when determining if a
121 branch head closes a branch or not.
125 branch head closes a branch or not.
122 """
126 """
123
127
124 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
128 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
125 filteredhash=None, closednodes=None):
129 filteredhash=None, closednodes=None):
126 super(branchcache, self).__init__(entries)
130 super(branchcache, self).__init__(entries)
127 self.tipnode = tipnode
131 self.tipnode = tipnode
128 self.tiprev = tiprev
132 self.tiprev = tiprev
129 self.filteredhash = filteredhash
133 self.filteredhash = filteredhash
130 # closednodes is a set of nodes that close their branch. If the branch
134 # closednodes is a set of nodes that close their branch. If the branch
131 # cache has been updated, it may contain nodes that are no longer
135 # cache has been updated, it may contain nodes that are no longer
132 # heads.
136 # heads.
133 if closednodes is None:
137 if closednodes is None:
134 self._closednodes = set()
138 self._closednodes = set()
135 else:
139 else:
136 self._closednodes = closednodes
140 self._closednodes = closednodes
137 self._revbranchcache = None
138
141
139 def _hashfiltered(self, repo):
142 def _hashfiltered(self, repo):
140 """build hash of revision filtered in the current cache
143 """build hash of revision filtered in the current cache
141
144
142 Tracking tipnode and tiprev is not enough to ensure validity of the
145 Tracking tipnode and tiprev is not enough to ensure validity of the
143 cache as they do not help to distinct cache that ignored various
146 cache as they do not help to distinct cache that ignored various
144 revision bellow tiprev.
147 revision bellow tiprev.
145
148
146 To detect such difference, we build a cache of all ignored revisions.
149 To detect such difference, we build a cache of all ignored revisions.
147 """
150 """
148 cl = repo.changelog
151 cl = repo.changelog
149 if not cl.filteredrevs:
152 if not cl.filteredrevs:
150 return None
153 return None
151 key = None
154 key = None
152 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
155 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
153 if revs:
156 if revs:
154 s = util.sha1()
157 s = util.sha1()
155 for rev in revs:
158 for rev in revs:
156 s.update('%s;' % rev)
159 s.update('%s;' % rev)
157 key = s.digest()
160 key = s.digest()
158 return key
161 return key
159
162
160 def validfor(self, repo):
163 def validfor(self, repo):
161 """Is the cache content valid regarding a repo
164 """Is the cache content valid regarding a repo
162
165
163 - False when cached tipnode is unknown or if we detect a strip.
166 - False when cached tipnode is unknown or if we detect a strip.
164 - True when cache is up to date or a subset of current repo."""
167 - True when cache is up to date or a subset of current repo."""
165 try:
168 try:
166 return ((self.tipnode == repo.changelog.node(self.tiprev))
169 return ((self.tipnode == repo.changelog.node(self.tiprev))
167 and (self.filteredhash == self._hashfiltered(repo)))
170 and (self.filteredhash == self._hashfiltered(repo)))
168 except IndexError:
171 except IndexError:
169 return False
172 return False
170
173
171 def _branchtip(self, heads):
174 def _branchtip(self, heads):
172 '''Return tuple with last open head in heads and false,
175 '''Return tuple with last open head in heads and false,
173 otherwise return last closed head and true.'''
176 otherwise return last closed head and true.'''
174 tip = heads[-1]
177 tip = heads[-1]
175 closed = True
178 closed = True
176 for h in reversed(heads):
179 for h in reversed(heads):
177 if h not in self._closednodes:
180 if h not in self._closednodes:
178 tip = h
181 tip = h
179 closed = False
182 closed = False
180 break
183 break
181 return tip, closed
184 return tip, closed
182
185
183 def branchtip(self, branch):
186 def branchtip(self, branch):
184 '''Return the tipmost open head on branch head, otherwise return the
187 '''Return the tipmost open head on branch head, otherwise return the
185 tipmost closed head on branch.
188 tipmost closed head on branch.
186 Raise KeyError for unknown branch.'''
189 Raise KeyError for unknown branch.'''
187 return self._branchtip(self[branch])[0]
190 return self._branchtip(self[branch])[0]
188
191
189 def branchheads(self, branch, closed=False):
192 def branchheads(self, branch, closed=False):
190 heads = self[branch]
193 heads = self[branch]
191 if not closed:
194 if not closed:
192 heads = [h for h in heads if h not in self._closednodes]
195 heads = [h for h in heads if h not in self._closednodes]
193 return heads
196 return heads
194
197
195 def iterbranches(self):
198 def iterbranches(self):
196 for bn, heads in self.iteritems():
199 for bn, heads in self.iteritems():
197 yield (bn, heads) + self._branchtip(heads)
200 yield (bn, heads) + self._branchtip(heads)
198
201
199 def copy(self):
202 def copy(self):
200 """return an deep copy of the branchcache object"""
203 """return an deep copy of the branchcache object"""
201 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
204 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
202 self._closednodes)
205 self._closednodes)
203
206
204 def write(self, repo):
207 def write(self, repo):
205 try:
208 try:
206 f = repo.vfs(_filename(repo), "w", atomictemp=True)
209 f = repo.vfs(_filename(repo), "w", atomictemp=True)
207 cachekey = [hex(self.tipnode), str(self.tiprev)]
210 cachekey = [hex(self.tipnode), str(self.tiprev)]
208 if self.filteredhash is not None:
211 if self.filteredhash is not None:
209 cachekey.append(hex(self.filteredhash))
212 cachekey.append(hex(self.filteredhash))
210 f.write(" ".join(cachekey) + '\n')
213 f.write(" ".join(cachekey) + '\n')
211 nodecount = 0
214 nodecount = 0
212 for label, nodes in sorted(self.iteritems()):
215 for label, nodes in sorted(self.iteritems()):
213 for node in nodes:
216 for node in nodes:
214 nodecount += 1
217 nodecount += 1
215 if node in self._closednodes:
218 if node in self._closednodes:
216 state = 'c'
219 state = 'c'
217 else:
220 else:
218 state = 'o'
221 state = 'o'
219 f.write("%s %s %s\n" % (hex(node), state,
222 f.write("%s %s %s\n" % (hex(node), state,
220 encoding.fromlocal(label)))
223 encoding.fromlocal(label)))
221 f.close()
224 f.close()
222 repo.ui.log('branchcache',
225 repo.ui.log('branchcache',
223 'wrote %s branch cache with %d labels and %d nodes\n',
226 'wrote %s branch cache with %d labels and %d nodes\n',
224 repo.filtername, len(self), nodecount)
227 repo.filtername, len(self), nodecount)
225 except (IOError, OSError, util.Abort), inst:
228 except (IOError, OSError, util.Abort), inst:
226 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
229 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
227 # Abort may be raise by read only opener
230 # Abort may be raise by read only opener
228 pass
231 pass
229 if self._revbranchcache:
230 self._revbranchcache.write(repo.unfiltered())
231 self._revbranchcache = None
232
232
233 def update(self, repo, revgen):
233 def update(self, repo, revgen):
234 """Given a branchhead cache, self, that may have extra nodes or be
234 """Given a branchhead cache, self, that may have extra nodes or be
235 missing heads, and a generator of nodes that are strictly a superset of
235 missing heads, and a generator of nodes that are strictly a superset of
236 heads missing, this function updates self to be correct.
236 heads missing, this function updates self to be correct.
237 """
237 """
238 starttime = time.time()
238 starttime = time.time()
239 cl = repo.changelog
239 cl = repo.changelog
240 # collect new branch entries
240 # collect new branch entries
241 newbranches = {}
241 newbranches = {}
242 urepo = repo.unfiltered()
242 urepo = repo.unfiltered()
243 self._revbranchcache = revbranchcache(urepo)
244 getbranchinfo = self._revbranchcache.branchinfo
245 ucl = urepo.changelog
243 ucl = urepo.changelog
244 getbranchinfo = repo.revbranchcache().branchinfo
246 for r in revgen:
245 for r in revgen:
247 branch, closesbranch = getbranchinfo(ucl, r)
246 branch, closesbranch = getbranchinfo(ucl, r)
248 newbranches.setdefault(branch, []).append(r)
247 newbranches.setdefault(branch, []).append(r)
249 if closesbranch:
248 if closesbranch:
250 self._closednodes.add(cl.node(r))
249 self._closednodes.add(cl.node(r))
251
250
252 # fetch current topological heads to speed up filtering
251 # fetch current topological heads to speed up filtering
253 topoheads = set(cl.headrevs())
252 topoheads = set(cl.headrevs())
254
253
255 # if older branchheads are reachable from new ones, they aren't
254 # if older branchheads are reachable from new ones, they aren't
256 # really branchheads. Note checking parents is insufficient:
255 # really branchheads. Note checking parents is insufficient:
257 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
256 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
258 for branch, newheadrevs in newbranches.iteritems():
257 for branch, newheadrevs in newbranches.iteritems():
259 bheads = self.setdefault(branch, [])
258 bheads = self.setdefault(branch, [])
260 bheadset = set(cl.rev(node) for node in bheads)
259 bheadset = set(cl.rev(node) for node in bheads)
261
260
262 # This have been tested True on all internal usage of this function.
261 # This have been tested True on all internal usage of this function.
263 # run it again in case of doubt
262 # run it again in case of doubt
264 # assert not (set(bheadrevs) & set(newheadrevs))
263 # assert not (set(bheadrevs) & set(newheadrevs))
265 newheadrevs.sort()
264 newheadrevs.sort()
266 bheadset.update(newheadrevs)
265 bheadset.update(newheadrevs)
267
266
268 # This prunes out two kinds of heads - heads that are superseded by
267 # This prunes out two kinds of heads - heads that are superseded by
269 # a head in newheadrevs, and newheadrevs that are not heads because
268 # a head in newheadrevs, and newheadrevs that are not heads because
270 # an existing head is their descendant.
269 # an existing head is their descendant.
271 uncertain = bheadset - topoheads
270 uncertain = bheadset - topoheads
272 if uncertain:
271 if uncertain:
273 floorrev = min(uncertain)
272 floorrev = min(uncertain)
274 ancestors = set(cl.ancestors(newheadrevs, floorrev))
273 ancestors = set(cl.ancestors(newheadrevs, floorrev))
275 bheadset -= ancestors
274 bheadset -= ancestors
276 bheadrevs = sorted(bheadset)
275 bheadrevs = sorted(bheadset)
277 self[branch] = [cl.node(rev) for rev in bheadrevs]
276 self[branch] = [cl.node(rev) for rev in bheadrevs]
278 tiprev = bheadrevs[-1]
277 tiprev = bheadrevs[-1]
279 if tiprev > self.tiprev:
278 if tiprev > self.tiprev:
280 self.tipnode = cl.node(tiprev)
279 self.tipnode = cl.node(tiprev)
281 self.tiprev = tiprev
280 self.tiprev = tiprev
282
281
283 if not self.validfor(repo):
282 if not self.validfor(repo):
284 # cache key are not valid anymore
283 # cache key are not valid anymore
285 self.tipnode = nullid
284 self.tipnode = nullid
286 self.tiprev = nullrev
285 self.tiprev = nullrev
287 for heads in self.values():
286 for heads in self.values():
288 tiprev = max(cl.rev(node) for node in heads)
287 tiprev = max(cl.rev(node) for node in heads)
289 if tiprev > self.tiprev:
288 if tiprev > self.tiprev:
290 self.tipnode = cl.node(tiprev)
289 self.tipnode = cl.node(tiprev)
291 self.tiprev = tiprev
290 self.tiprev = tiprev
292 self.filteredhash = self._hashfiltered(repo)
291 self.filteredhash = self._hashfiltered(repo)
293
292
294 duration = time.time() - starttime
293 duration = time.time() - starttime
295 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
294 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
296 repo.filtername, duration)
295 repo.filtername, duration)
297
296
298 # Revision branch info cache
297 # Revision branch info cache
299
298
300 _rbcversion = '-v1'
299 _rbcversion = '-v1'
301 _rbcnames = 'cache/rbc-names' + _rbcversion
300 _rbcnames = 'cache/rbc-names' + _rbcversion
302 _rbcrevs = 'cache/rbc-revs' + _rbcversion
301 _rbcrevs = 'cache/rbc-revs' + _rbcversion
303 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
302 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
304 _rbcrecfmt = '>4sI'
303 _rbcrecfmt = '>4sI'
305 _rbcrecsize = calcsize(_rbcrecfmt)
304 _rbcrecsize = calcsize(_rbcrecfmt)
306 _rbcnodelen = 4
305 _rbcnodelen = 4
307 _rbcbranchidxmask = 0x7fffffff
306 _rbcbranchidxmask = 0x7fffffff
308 _rbccloseflag = 0x80000000
307 _rbccloseflag = 0x80000000
309
308
310 class revbranchcache(object):
309 class revbranchcache(object):
311 """Persistent cache, mapping from revision number to branch name and close.
310 """Persistent cache, mapping from revision number to branch name and close.
312 This is a low level cache, independent of filtering.
311 This is a low level cache, independent of filtering.
313
312
314 Branch names are stored in rbc-names in internal encoding separated by 0.
313 Branch names are stored in rbc-names in internal encoding separated by 0.
315 rbc-names is append-only, and each branch name is only stored once and will
314 rbc-names is append-only, and each branch name is only stored once and will
316 thus have a unique index.
315 thus have a unique index.
317
316
318 The branch info for each revision is stored in rbc-revs as constant size
317 The branch info for each revision is stored in rbc-revs as constant size
319 records. The whole file is read into memory, but it is only 'parsed' on
318 records. The whole file is read into memory, but it is only 'parsed' on
320 demand. The file is usually append-only but will be truncated if repo
319 demand. The file is usually append-only but will be truncated if repo
321 modification is detected.
320 modification is detected.
322 The record for each revision contains the first 4 bytes of the
321 The record for each revision contains the first 4 bytes of the
323 corresponding node hash, and the record is only used if it still matches.
322 corresponding node hash, and the record is only used if it still matches.
324 Even a completely trashed rbc-revs fill thus still give the right result
323 Even a completely trashed rbc-revs fill thus still give the right result
325 while converging towards full recovery ... assuming no incorrectly matching
324 while converging towards full recovery ... assuming no incorrectly matching
326 node hashes.
325 node hashes.
327 The record also contains 4 bytes where 31 bits contains the index of the
326 The record also contains 4 bytes where 31 bits contains the index of the
328 branch and the last bit indicate that it is a branch close commit.
327 branch and the last bit indicate that it is a branch close commit.
329 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
328 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
330 and will grow with it but be 1/8th of its size.
329 and will grow with it but be 1/8th of its size.
331 """
330 """
332
331
333 def __init__(self, repo, readonly=True):
332 def __init__(self, repo, readonly=True):
334 assert repo.filtername is None
333 assert repo.filtername is None
335 self._names = [] # branch names in local encoding with static index
334 self._names = [] # branch names in local encoding with static index
336 self._rbcrevs = array('c') # structs of type _rbcrecfmt
335 self._rbcrevs = array('c') # structs of type _rbcrecfmt
337 self._rbcsnameslen = 0
336 self._rbcsnameslen = 0
338 try:
337 try:
339 bndata = repo.vfs.read(_rbcnames)
338 bndata = repo.vfs.read(_rbcnames)
340 self._rbcsnameslen = len(bndata) # for verification before writing
339 self._rbcsnameslen = len(bndata) # for verification before writing
341 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
340 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
342 except (IOError, OSError), inst:
341 except (IOError, OSError), inst:
343 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
342 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
344 inst)
343 inst)
345 if readonly:
344 if readonly:
346 # don't try to use cache - fall back to the slow path
345 # don't try to use cache - fall back to the slow path
347 self.branchinfo = self._branchinfo
346 self.branchinfo = self._branchinfo
348
347
349 if self._names:
348 if self._names:
350 try:
349 try:
351 data = repo.vfs.read(_rbcrevs)
350 data = repo.vfs.read(_rbcrevs)
352 self._rbcrevs.fromstring(data)
351 self._rbcrevs.fromstring(data)
353 except (IOError, OSError), inst:
352 except (IOError, OSError), inst:
354 repo.ui.debug("couldn't read revision branch cache: %s\n" %
353 repo.ui.debug("couldn't read revision branch cache: %s\n" %
355 inst)
354 inst)
356 # remember number of good records on disk
355 # remember number of good records on disk
357 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
356 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
358 len(repo.changelog))
357 len(repo.changelog))
359 if self._rbcrevslen == 0:
358 if self._rbcrevslen == 0:
360 self._names = []
359 self._names = []
361 self._rbcnamescount = len(self._names) # number of good names on disk
360 self._rbcnamescount = len(self._names) # number of good names on disk
362 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
361 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
363
362
364 def branchinfo(self, changelog, rev):
363 def branchinfo(self, changelog, rev):
365 """Return branch name and close flag for rev, using and updating
364 """Return branch name and close flag for rev, using and updating
366 persistent cache."""
365 persistent cache."""
367 rbcrevidx = rev * _rbcrecsize
366 rbcrevidx = rev * _rbcrecsize
368
367
369 # if requested rev is missing, add and populate all missing revs
368 # if requested rev is missing, add and populate all missing revs
370 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
369 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
371 first = len(self._rbcrevs) // _rbcrecsize
370 first = len(self._rbcrevs) // _rbcrecsize
372 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
371 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
373 len(self._rbcrevs)))
372 len(self._rbcrevs)))
374 for r in xrange(first, len(changelog)):
373 for r in xrange(first, len(changelog)):
375 self._branchinfo(changelog, r)
374 self._branchinfo(changelog, r)
376
375
377 # fast path: extract data from cache, use it if node is matching
376 # fast path: extract data from cache, use it if node is matching
378 reponode = changelog.node(rev)[:_rbcnodelen]
377 reponode = changelog.node(rev)[:_rbcnodelen]
379 cachenode, branchidx = unpack(
378 cachenode, branchidx = unpack(
380 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
379 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
381 close = bool(branchidx & _rbccloseflag)
380 close = bool(branchidx & _rbccloseflag)
382 if close:
381 if close:
383 branchidx &= _rbcbranchidxmask
382 branchidx &= _rbcbranchidxmask
384 if cachenode == reponode:
383 if cachenode == reponode:
385 return self._names[branchidx], close
384 return self._names[branchidx], close
386 # fall back to slow path and make sure it will be written to disk
385 # fall back to slow path and make sure it will be written to disk
387 self._rbcrevslen = min(self._rbcrevslen, rev)
386 self._rbcrevslen = min(self._rbcrevslen, rev)
388 return self._branchinfo(changelog, rev)
387 return self._branchinfo(changelog, rev)
389
388
390 def _branchinfo(self, changelog, rev):
389 def _branchinfo(self, changelog, rev):
391 """Retrieve branch info from changelog and update _rbcrevs"""
390 """Retrieve branch info from changelog and update _rbcrevs"""
392 b, close = changelog.branchinfo(rev)
391 b, close = changelog.branchinfo(rev)
393 if b in self._namesreverse:
392 if b in self._namesreverse:
394 branchidx = self._namesreverse[b]
393 branchidx = self._namesreverse[b]
395 else:
394 else:
396 branchidx = len(self._names)
395 branchidx = len(self._names)
397 self._names.append(b)
396 self._names.append(b)
398 self._namesreverse[b] = branchidx
397 self._namesreverse[b] = branchidx
399 reponode = changelog.node(rev)
398 reponode = changelog.node(rev)
400 if close:
399 if close:
401 branchidx |= _rbccloseflag
400 branchidx |= _rbccloseflag
402 rbcrevidx = rev * _rbcrecsize
401 rbcrevidx = rev * _rbcrecsize
403 rec = array('c')
402 rec = array('c')
404 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
403 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
405 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
404 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
406 return b, close
405 return b, close
407
406
408 def write(self, repo):
407 def write(self, repo):
409 """Save branch cache if it is dirty."""
408 """Save branch cache if it is dirty."""
410 if self._rbcnamescount < len(self._names):
409 if self._rbcnamescount < len(self._names):
411 try:
410 try:
412 if self._rbcnamescount != 0:
411 if self._rbcnamescount != 0:
413 f = repo.vfs.open(_rbcnames, 'ab')
412 f = repo.vfs.open(_rbcnames, 'ab')
414 if f.tell() == self._rbcsnameslen:
413 if f.tell() == self._rbcsnameslen:
415 f.write('\0')
414 f.write('\0')
416 else:
415 else:
417 f.close()
416 f.close()
418 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
417 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
419 self._rbcnamescount = 0
418 self._rbcnamescount = 0
420 self._rbcrevslen = 0
419 self._rbcrevslen = 0
421 if self._rbcnamescount == 0:
420 if self._rbcnamescount == 0:
422 f = repo.vfs.open(_rbcnames, 'wb')
421 f = repo.vfs.open(_rbcnames, 'wb')
423 f.write('\0'.join(encoding.fromlocal(b)
422 f.write('\0'.join(encoding.fromlocal(b)
424 for b in self._names[self._rbcnamescount:]))
423 for b in self._names[self._rbcnamescount:]))
425 self._rbcsnameslen = f.tell()
424 self._rbcsnameslen = f.tell()
426 f.close()
425 f.close()
427 except (IOError, OSError, util.Abort), inst:
426 except (IOError, OSError, util.Abort), inst:
428 repo.ui.debug("couldn't write revision branch cache names: "
427 repo.ui.debug("couldn't write revision branch cache names: "
429 "%s\n" % inst)
428 "%s\n" % inst)
430 return
429 return
431 self._rbcnamescount = len(self._names)
430 self._rbcnamescount = len(self._names)
432
431
433 start = self._rbcrevslen * _rbcrecsize
432 start = self._rbcrevslen * _rbcrecsize
434 if start != len(self._rbcrevs):
433 if start != len(self._rbcrevs):
435 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
434 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
436 try:
435 try:
437 f = repo.vfs.open(_rbcrevs, 'ab')
436 f = repo.vfs.open(_rbcrevs, 'ab')
438 if f.tell() != start:
437 if f.tell() != start:
439 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
438 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
440 f.seek(start)
439 f.seek(start)
441 f.truncate()
440 f.truncate()
442 end = revs * _rbcrecsize
441 end = revs * _rbcrecsize
443 f.write(self._rbcrevs[start:end])
442 f.write(self._rbcrevs[start:end])
444 f.close()
443 f.close()
445 except (IOError, OSError, util.Abort), inst:
444 except (IOError, OSError, util.Abort), inst:
446 repo.ui.debug("couldn't write revision branch cache: %s\n" %
445 repo.ui.debug("couldn't write revision branch cache: %s\n" %
447 inst)
446 inst)
448 return
447 return
449 self._rbcrevslen = revs
448 self._rbcrevslen = revs
@@ -1,1887 +1,1894 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from node import hex, nullid, short
7 from node import hex, nullid, short
8 from i18n import _
8 from i18n import _
9 import urllib
9 import urllib
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock as lockmod
12 import lock as lockmod
13 import transaction, store, encoding, exchange, bundle2
13 import transaction, store, encoding, exchange, bundle2
14 import scmutil, util, extensions, hook, error, revset
14 import scmutil, util, extensions, hook, error, revset
15 import match as matchmod
15 import match as matchmod
16 import merge as mergemod
16 import merge as mergemod
17 import tags as tagsmod
17 import tags as tagsmod
18 from lock import release
18 from lock import release
19 import weakref, errno, os, time, inspect
19 import weakref, errno, os, time, inspect
20 import branchmap, pathutil
20 import branchmap, pathutil
21 import namespaces
21 import namespaces
22 propertycache = util.propertycache
22 propertycache = util.propertycache
23 filecache = scmutil.filecache
23 filecache = scmutil.filecache
24
24
25 class repofilecache(filecache):
25 class repofilecache(filecache):
26 """All filecache usage on repo are done for logic that should be unfiltered
26 """All filecache usage on repo are done for logic that should be unfiltered
27 """
27 """
28
28
29 def __get__(self, repo, type=None):
29 def __get__(self, repo, type=None):
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 def __set__(self, repo, value):
31 def __set__(self, repo, value):
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 def __delete__(self, repo):
33 def __delete__(self, repo):
34 return super(repofilecache, self).__delete__(repo.unfiltered())
34 return super(repofilecache, self).__delete__(repo.unfiltered())
35
35
36 class storecache(repofilecache):
36 class storecache(repofilecache):
37 """filecache for files in the store"""
37 """filecache for files in the store"""
38 def join(self, obj, fname):
38 def join(self, obj, fname):
39 return obj.sjoin(fname)
39 return obj.sjoin(fname)
40
40
41 class unfilteredpropertycache(propertycache):
41 class unfilteredpropertycache(propertycache):
42 """propertycache that apply to unfiltered repo only"""
42 """propertycache that apply to unfiltered repo only"""
43
43
44 def __get__(self, repo, type=None):
44 def __get__(self, repo, type=None):
45 unfi = repo.unfiltered()
45 unfi = repo.unfiltered()
46 if unfi is repo:
46 if unfi is repo:
47 return super(unfilteredpropertycache, self).__get__(unfi)
47 return super(unfilteredpropertycache, self).__get__(unfi)
48 return getattr(unfi, self.name)
48 return getattr(unfi, self.name)
49
49
50 class filteredpropertycache(propertycache):
50 class filteredpropertycache(propertycache):
51 """propertycache that must take filtering in account"""
51 """propertycache that must take filtering in account"""
52
52
53 def cachevalue(self, obj, value):
53 def cachevalue(self, obj, value):
54 object.__setattr__(obj, self.name, value)
54 object.__setattr__(obj, self.name, value)
55
55
56
56
57 def hasunfilteredcache(repo, name):
57 def hasunfilteredcache(repo, name):
58 """check if a repo has an unfilteredpropertycache value for <name>"""
58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 return name in vars(repo.unfiltered())
59 return name in vars(repo.unfiltered())
60
60
61 def unfilteredmethod(orig):
61 def unfilteredmethod(orig):
62 """decorate method that always need to be run on unfiltered version"""
62 """decorate method that always need to be run on unfiltered version"""
63 def wrapper(repo, *args, **kwargs):
63 def wrapper(repo, *args, **kwargs):
64 return orig(repo.unfiltered(), *args, **kwargs)
64 return orig(repo.unfiltered(), *args, **kwargs)
65 return wrapper
65 return wrapper
66
66
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 'unbundle'))
68 'unbundle'))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70
70
71 class localpeer(peer.peerrepository):
71 class localpeer(peer.peerrepository):
72 '''peer for a local repo; reflects only the most recent API'''
72 '''peer for a local repo; reflects only the most recent API'''
73
73
74 def __init__(self, repo, caps=moderncaps):
74 def __init__(self, repo, caps=moderncaps):
75 peer.peerrepository.__init__(self)
75 peer.peerrepository.__init__(self)
76 self._repo = repo.filtered('served')
76 self._repo = repo.filtered('served')
77 self.ui = repo.ui
77 self.ui = repo.ui
78 self._caps = repo._restrictcapabilities(caps)
78 self._caps = repo._restrictcapabilities(caps)
79 self.requirements = repo.requirements
79 self.requirements = repo.requirements
80 self.supportedformats = repo.supportedformats
80 self.supportedformats = repo.supportedformats
81
81
82 def close(self):
82 def close(self):
83 self._repo.close()
83 self._repo.close()
84
84
85 def _capabilities(self):
85 def _capabilities(self):
86 return self._caps
86 return self._caps
87
87
88 def local(self):
88 def local(self):
89 return self._repo
89 return self._repo
90
90
91 def canpush(self):
91 def canpush(self):
92 return True
92 return True
93
93
94 def url(self):
94 def url(self):
95 return self._repo.url()
95 return self._repo.url()
96
96
97 def lookup(self, key):
97 def lookup(self, key):
98 return self._repo.lookup(key)
98 return self._repo.lookup(key)
99
99
100 def branchmap(self):
100 def branchmap(self):
101 return self._repo.branchmap()
101 return self._repo.branchmap()
102
102
103 def heads(self):
103 def heads(self):
104 return self._repo.heads()
104 return self._repo.heads()
105
105
106 def known(self, nodes):
106 def known(self, nodes):
107 return self._repo.known(nodes)
107 return self._repo.known(nodes)
108
108
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 format='HG10', **kwargs):
110 format='HG10', **kwargs):
111 cg = exchange.getbundle(self._repo, source, heads=heads,
111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 common=common, bundlecaps=bundlecaps, **kwargs)
112 common=common, bundlecaps=bundlecaps, **kwargs)
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 # When requesting a bundle2, getbundle returns a stream to make the
114 # When requesting a bundle2, getbundle returns a stream to make the
115 # wire level function happier. We need to build a proper object
115 # wire level function happier. We need to build a proper object
116 # from it in local peer.
116 # from it in local peer.
117 cg = bundle2.unbundle20(self.ui, cg)
117 cg = bundle2.unbundle20(self.ui, cg)
118 return cg
118 return cg
119
119
120 # TODO We might want to move the next two calls into legacypeer and add
120 # TODO We might want to move the next two calls into legacypeer and add
121 # unbundle instead.
121 # unbundle instead.
122
122
123 def unbundle(self, cg, heads, url):
123 def unbundle(self, cg, heads, url):
124 """apply a bundle on a repo
124 """apply a bundle on a repo
125
125
126 This function handles the repo locking itself."""
126 This function handles the repo locking itself."""
127 try:
127 try:
128 cg = exchange.readbundle(self.ui, cg, None)
128 cg = exchange.readbundle(self.ui, cg, None)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 if util.safehasattr(ret, 'getchunks'):
130 if util.safehasattr(ret, 'getchunks'):
131 # This is a bundle20 object, turn it into an unbundler.
131 # This is a bundle20 object, turn it into an unbundler.
132 # This little dance should be dropped eventually when the API
132 # This little dance should be dropped eventually when the API
133 # is finally improved.
133 # is finally improved.
134 stream = util.chunkbuffer(ret.getchunks())
134 stream = util.chunkbuffer(ret.getchunks())
135 ret = bundle2.unbundle20(self.ui, stream)
135 ret = bundle2.unbundle20(self.ui, stream)
136 return ret
136 return ret
137 except error.PushRaced, exc:
137 except error.PushRaced, exc:
138 raise error.ResponseError(_('push failed:'), str(exc))
138 raise error.ResponseError(_('push failed:'), str(exc))
139
139
140 def lock(self):
140 def lock(self):
141 return self._repo.lock()
141 return self._repo.lock()
142
142
143 def addchangegroup(self, cg, source, url):
143 def addchangegroup(self, cg, source, url):
144 return changegroup.addchangegroup(self._repo, cg, source, url)
144 return changegroup.addchangegroup(self._repo, cg, source, url)
145
145
146 def pushkey(self, namespace, key, old, new):
146 def pushkey(self, namespace, key, old, new):
147 return self._repo.pushkey(namespace, key, old, new)
147 return self._repo.pushkey(namespace, key, old, new)
148
148
149 def listkeys(self, namespace):
149 def listkeys(self, namespace):
150 return self._repo.listkeys(namespace)
150 return self._repo.listkeys(namespace)
151
151
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 '''used to test argument passing over the wire'''
153 '''used to test argument passing over the wire'''
154 return "%s %s %s %s %s" % (one, two, three, four, five)
154 return "%s %s %s %s %s" % (one, two, three, four, five)
155
155
156 class locallegacypeer(localpeer):
156 class locallegacypeer(localpeer):
157 '''peer extension which implements legacy methods too; used for tests with
157 '''peer extension which implements legacy methods too; used for tests with
158 restricted capabilities'''
158 restricted capabilities'''
159
159
160 def __init__(self, repo):
160 def __init__(self, repo):
161 localpeer.__init__(self, repo, caps=legacycaps)
161 localpeer.__init__(self, repo, caps=legacycaps)
162
162
163 def branches(self, nodes):
163 def branches(self, nodes):
164 return self._repo.branches(nodes)
164 return self._repo.branches(nodes)
165
165
166 def between(self, pairs):
166 def between(self, pairs):
167 return self._repo.between(pairs)
167 return self._repo.between(pairs)
168
168
169 def changegroup(self, basenodes, source):
169 def changegroup(self, basenodes, source):
170 return changegroup.changegroup(self._repo, basenodes, source)
170 return changegroup.changegroup(self._repo, basenodes, source)
171
171
172 def changegroupsubset(self, bases, heads, source):
172 def changegroupsubset(self, bases, heads, source):
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174
174
175 class localrepository(object):
175 class localrepository(object):
176
176
177 supportedformats = set(('revlogv1', 'generaldelta'))
177 supportedformats = set(('revlogv1', 'generaldelta'))
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 'dotencode'))
179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
181 requirements = ['revlogv1']
181 requirements = ['revlogv1']
182 filtername = None
182 filtername = None
183
183
184 # a list of (ui, featureset) functions.
184 # a list of (ui, featureset) functions.
185 # only functions defined in module of enabled extensions are invoked
185 # only functions defined in module of enabled extensions are invoked
186 featuresetupfuncs = set()
186 featuresetupfuncs = set()
187
187
188 def _baserequirements(self, create):
188 def _baserequirements(self, create):
189 return self.requirements[:]
189 return self.requirements[:]
190
190
191 def __init__(self, baseui, path=None, create=False):
191 def __init__(self, baseui, path=None, create=False):
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 self.wopener = self.wvfs
193 self.wopener = self.wvfs
194 self.root = self.wvfs.base
194 self.root = self.wvfs.base
195 self.path = self.wvfs.join(".hg")
195 self.path = self.wvfs.join(".hg")
196 self.origroot = path
196 self.origroot = path
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 self.vfs = scmutil.vfs(self.path)
198 self.vfs = scmutil.vfs(self.path)
199 self.opener = self.vfs
199 self.opener = self.vfs
200 self.baseui = baseui
200 self.baseui = baseui
201 self.ui = baseui.copy()
201 self.ui = baseui.copy()
202 self.ui.copy = baseui.copy # prevent copying repo configuration
202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 # A list of callback to shape the phase if no data were found.
203 # A list of callback to shape the phase if no data were found.
204 # Callback are in the form: func(repo, roots) --> processed root.
204 # Callback are in the form: func(repo, roots) --> processed root.
205 # This list it to be filled by extension during repo setup
205 # This list it to be filled by extension during repo setup
206 self._phasedefaults = []
206 self._phasedefaults = []
207 try:
207 try:
208 self.ui.readconfig(self.join("hgrc"), self.root)
208 self.ui.readconfig(self.join("hgrc"), self.root)
209 extensions.loadall(self.ui)
209 extensions.loadall(self.ui)
210 except IOError:
210 except IOError:
211 pass
211 pass
212
212
213 if self.featuresetupfuncs:
213 if self.featuresetupfuncs:
214 self.supported = set(self._basesupported) # use private copy
214 self.supported = set(self._basesupported) # use private copy
215 extmods = set(m.__name__ for n, m
215 extmods = set(m.__name__ for n, m
216 in extensions.extensions(self.ui))
216 in extensions.extensions(self.ui))
217 for setupfunc in self.featuresetupfuncs:
217 for setupfunc in self.featuresetupfuncs:
218 if setupfunc.__module__ in extmods:
218 if setupfunc.__module__ in extmods:
219 setupfunc(self.ui, self.supported)
219 setupfunc(self.ui, self.supported)
220 else:
220 else:
221 self.supported = self._basesupported
221 self.supported = self._basesupported
222
222
223 if not self.vfs.isdir():
223 if not self.vfs.isdir():
224 if create:
224 if create:
225 if not self.wvfs.exists():
225 if not self.wvfs.exists():
226 self.wvfs.makedirs()
226 self.wvfs.makedirs()
227 self.vfs.makedir(notindexed=True)
227 self.vfs.makedir(notindexed=True)
228 requirements = self._baserequirements(create)
228 requirements = self._baserequirements(create)
229 if self.ui.configbool('format', 'usestore', True):
229 if self.ui.configbool('format', 'usestore', True):
230 self.vfs.mkdir("store")
230 self.vfs.mkdir("store")
231 requirements.append("store")
231 requirements.append("store")
232 if self.ui.configbool('format', 'usefncache', True):
232 if self.ui.configbool('format', 'usefncache', True):
233 requirements.append("fncache")
233 requirements.append("fncache")
234 if self.ui.configbool('format', 'dotencode', True):
234 if self.ui.configbool('format', 'dotencode', True):
235 requirements.append('dotencode')
235 requirements.append('dotencode')
236 # create an invalid changelog
236 # create an invalid changelog
237 self.vfs.append(
237 self.vfs.append(
238 "00changelog.i",
238 "00changelog.i",
239 '\0\0\0\2' # represents revlogv2
239 '\0\0\0\2' # represents revlogv2
240 ' dummy changelog to prevent using the old repo layout'
240 ' dummy changelog to prevent using the old repo layout'
241 )
241 )
242 if self.ui.configbool('format', 'generaldelta', False):
242 if self.ui.configbool('format', 'generaldelta', False):
243 requirements.append("generaldelta")
243 requirements.append("generaldelta")
244 requirements = set(requirements)
244 requirements = set(requirements)
245 else:
245 else:
246 raise error.RepoError(_("repository %s not found") % path)
246 raise error.RepoError(_("repository %s not found") % path)
247 elif create:
247 elif create:
248 raise error.RepoError(_("repository %s already exists") % path)
248 raise error.RepoError(_("repository %s already exists") % path)
249 else:
249 else:
250 try:
250 try:
251 requirements = scmutil.readrequires(self.vfs, self.supported)
251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 except IOError, inst:
252 except IOError, inst:
253 if inst.errno != errno.ENOENT:
253 if inst.errno != errno.ENOENT:
254 raise
254 raise
255 requirements = set()
255 requirements = set()
256
256
257 self.sharedpath = self.path
257 self.sharedpath = self.path
258 try:
258 try:
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 realpath=True)
260 realpath=True)
261 s = vfs.base
261 s = vfs.base
262 if not vfs.exists():
262 if not vfs.exists():
263 raise error.RepoError(
263 raise error.RepoError(
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 self.sharedpath = s
265 self.sharedpath = s
266 except IOError, inst:
266 except IOError, inst:
267 if inst.errno != errno.ENOENT:
267 if inst.errno != errno.ENOENT:
268 raise
268 raise
269
269
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 self.spath = self.store.path
271 self.spath = self.store.path
272 self.svfs = self.store.vfs
272 self.svfs = self.store.vfs
273 self.sopener = self.svfs
273 self.sopener = self.svfs
274 self.sjoin = self.store.join
274 self.sjoin = self.store.join
275 self.vfs.createmode = self.store.createmode
275 self.vfs.createmode = self.store.createmode
276 self._applyrequirements(requirements)
276 self._applyrequirements(requirements)
277 if create:
277 if create:
278 self._writerequirements()
278 self._writerequirements()
279
279
280
280
281 self._branchcaches = {}
281 self._branchcaches = {}
282 self._revbranchcache = None
282 self.filterpats = {}
283 self.filterpats = {}
283 self._datafilters = {}
284 self._datafilters = {}
284 self._transref = self._lockref = self._wlockref = None
285 self._transref = self._lockref = self._wlockref = None
285
286
286 # A cache for various files under .hg/ that tracks file changes,
287 # A cache for various files under .hg/ that tracks file changes,
287 # (used by the filecache decorator)
288 # (used by the filecache decorator)
288 #
289 #
289 # Maps a property name to its util.filecacheentry
290 # Maps a property name to its util.filecacheentry
290 self._filecache = {}
291 self._filecache = {}
291
292
292 # hold sets of revision to be filtered
293 # hold sets of revision to be filtered
293 # should be cleared when something might have changed the filter value:
294 # should be cleared when something might have changed the filter value:
294 # - new changesets,
295 # - new changesets,
295 # - phase change,
296 # - phase change,
296 # - new obsolescence marker,
297 # - new obsolescence marker,
297 # - working directory parent change,
298 # - working directory parent change,
298 # - bookmark changes
299 # - bookmark changes
299 self.filteredrevcache = {}
300 self.filteredrevcache = {}
300
301
301 # generic mapping between names and nodes
302 # generic mapping between names and nodes
302 self.names = namespaces.namespaces()
303 self.names = namespaces.namespaces()
303
304
304 def close(self):
305 def close(self):
305 pass
306 pass
306
307
307 def _restrictcapabilities(self, caps):
308 def _restrictcapabilities(self, caps):
308 # bundle2 is not ready for prime time, drop it unless explicitly
309 # bundle2 is not ready for prime time, drop it unless explicitly
309 # required by the tests (or some brave tester)
310 # required by the tests (or some brave tester)
310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 caps = set(caps)
312 caps = set(caps)
312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 return caps
315 return caps
315
316
316 def _applyrequirements(self, requirements):
317 def _applyrequirements(self, requirements):
317 self.requirements = requirements
318 self.requirements = requirements
318 self.svfs.options = dict((r, 1) for r in requirements
319 self.svfs.options = dict((r, 1) for r in requirements
319 if r in self.openerreqs)
320 if r in self.openerreqs)
320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 if chunkcachesize is not None:
322 if chunkcachesize is not None:
322 self.svfs.options['chunkcachesize'] = chunkcachesize
323 self.svfs.options['chunkcachesize'] = chunkcachesize
323 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 if maxchainlen is not None:
325 if maxchainlen is not None:
325 self.svfs.options['maxchainlen'] = maxchainlen
326 self.svfs.options['maxchainlen'] = maxchainlen
326 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
327 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
327 if manifestcachesize is not None:
328 if manifestcachesize is not None:
328 self.svfs.options['manifestcachesize'] = manifestcachesize
329 self.svfs.options['manifestcachesize'] = manifestcachesize
329
330
330 def _writerequirements(self):
331 def _writerequirements(self):
331 reqfile = self.vfs("requires", "w")
332 reqfile = self.vfs("requires", "w")
332 for r in sorted(self.requirements):
333 for r in sorted(self.requirements):
333 reqfile.write("%s\n" % r)
334 reqfile.write("%s\n" % r)
334 reqfile.close()
335 reqfile.close()
335
336
336 def _checknested(self, path):
337 def _checknested(self, path):
337 """Determine if path is a legal nested repository."""
338 """Determine if path is a legal nested repository."""
338 if not path.startswith(self.root):
339 if not path.startswith(self.root):
339 return False
340 return False
340 subpath = path[len(self.root) + 1:]
341 subpath = path[len(self.root) + 1:]
341 normsubpath = util.pconvert(subpath)
342 normsubpath = util.pconvert(subpath)
342
343
343 # XXX: Checking against the current working copy is wrong in
344 # XXX: Checking against the current working copy is wrong in
344 # the sense that it can reject things like
345 # the sense that it can reject things like
345 #
346 #
346 # $ hg cat -r 10 sub/x.txt
347 # $ hg cat -r 10 sub/x.txt
347 #
348 #
348 # if sub/ is no longer a subrepository in the working copy
349 # if sub/ is no longer a subrepository in the working copy
349 # parent revision.
350 # parent revision.
350 #
351 #
351 # However, it can of course also allow things that would have
352 # However, it can of course also allow things that would have
352 # been rejected before, such as the above cat command if sub/
353 # been rejected before, such as the above cat command if sub/
353 # is a subrepository now, but was a normal directory before.
354 # is a subrepository now, but was a normal directory before.
354 # The old path auditor would have rejected by mistake since it
355 # The old path auditor would have rejected by mistake since it
355 # panics when it sees sub/.hg/.
356 # panics when it sees sub/.hg/.
356 #
357 #
357 # All in all, checking against the working copy seems sensible
358 # All in all, checking against the working copy seems sensible
358 # since we want to prevent access to nested repositories on
359 # since we want to prevent access to nested repositories on
359 # the filesystem *now*.
360 # the filesystem *now*.
360 ctx = self[None]
361 ctx = self[None]
361 parts = util.splitpath(subpath)
362 parts = util.splitpath(subpath)
362 while parts:
363 while parts:
363 prefix = '/'.join(parts)
364 prefix = '/'.join(parts)
364 if prefix in ctx.substate:
365 if prefix in ctx.substate:
365 if prefix == normsubpath:
366 if prefix == normsubpath:
366 return True
367 return True
367 else:
368 else:
368 sub = ctx.sub(prefix)
369 sub = ctx.sub(prefix)
369 return sub.checknested(subpath[len(prefix) + 1:])
370 return sub.checknested(subpath[len(prefix) + 1:])
370 else:
371 else:
371 parts.pop()
372 parts.pop()
372 return False
373 return False
373
374
374 def peer(self):
375 def peer(self):
375 return localpeer(self) # not cached to avoid reference cycle
376 return localpeer(self) # not cached to avoid reference cycle
376
377
377 def unfiltered(self):
378 def unfiltered(self):
378 """Return unfiltered version of the repository
379 """Return unfiltered version of the repository
379
380
380 Intended to be overwritten by filtered repo."""
381 Intended to be overwritten by filtered repo."""
381 return self
382 return self
382
383
383 def filtered(self, name):
384 def filtered(self, name):
384 """Return a filtered version of a repository"""
385 """Return a filtered version of a repository"""
385 # build a new class with the mixin and the current class
386 # build a new class with the mixin and the current class
386 # (possibly subclass of the repo)
387 # (possibly subclass of the repo)
387 class proxycls(repoview.repoview, self.unfiltered().__class__):
388 class proxycls(repoview.repoview, self.unfiltered().__class__):
388 pass
389 pass
389 return proxycls(self, name)
390 return proxycls(self, name)
390
391
391 @repofilecache('bookmarks')
392 @repofilecache('bookmarks')
392 def _bookmarks(self):
393 def _bookmarks(self):
393 return bookmarks.bmstore(self)
394 return bookmarks.bmstore(self)
394
395
395 @repofilecache('bookmarks.current')
396 @repofilecache('bookmarks.current')
396 def _bookmarkcurrent(self):
397 def _bookmarkcurrent(self):
397 return bookmarks.readcurrent(self)
398 return bookmarks.readcurrent(self)
398
399
399 def bookmarkheads(self, bookmark):
400 def bookmarkheads(self, bookmark):
400 name = bookmark.split('@', 1)[0]
401 name = bookmark.split('@', 1)[0]
401 heads = []
402 heads = []
402 for mark, n in self._bookmarks.iteritems():
403 for mark, n in self._bookmarks.iteritems():
403 if mark.split('@', 1)[0] == name:
404 if mark.split('@', 1)[0] == name:
404 heads.append(n)
405 heads.append(n)
405 return heads
406 return heads
406
407
407 @storecache('phaseroots')
408 @storecache('phaseroots')
408 def _phasecache(self):
409 def _phasecache(self):
409 return phases.phasecache(self, self._phasedefaults)
410 return phases.phasecache(self, self._phasedefaults)
410
411
411 @storecache('obsstore')
412 @storecache('obsstore')
412 def obsstore(self):
413 def obsstore(self):
413 # read default format for new obsstore.
414 # read default format for new obsstore.
414 defaultformat = self.ui.configint('format', 'obsstore-version', None)
415 defaultformat = self.ui.configint('format', 'obsstore-version', None)
415 # rely on obsstore class default when possible.
416 # rely on obsstore class default when possible.
416 kwargs = {}
417 kwargs = {}
417 if defaultformat is not None:
418 if defaultformat is not None:
418 kwargs['defaultformat'] = defaultformat
419 kwargs['defaultformat'] = defaultformat
419 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
420 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
420 store = obsolete.obsstore(self.svfs, readonly=readonly,
421 store = obsolete.obsstore(self.svfs, readonly=readonly,
421 **kwargs)
422 **kwargs)
422 if store and readonly:
423 if store and readonly:
423 # message is rare enough to not be translated
424 # message is rare enough to not be translated
424 msg = 'obsolete feature not enabled but %i markers found!\n'
425 msg = 'obsolete feature not enabled but %i markers found!\n'
425 self.ui.warn(msg % len(list(store)))
426 self.ui.warn(msg % len(list(store)))
426 return store
427 return store
427
428
428 @storecache('00changelog.i')
429 @storecache('00changelog.i')
429 def changelog(self):
430 def changelog(self):
430 c = changelog.changelog(self.svfs)
431 c = changelog.changelog(self.svfs)
431 if 'HG_PENDING' in os.environ:
432 if 'HG_PENDING' in os.environ:
432 p = os.environ['HG_PENDING']
433 p = os.environ['HG_PENDING']
433 if p.startswith(self.root):
434 if p.startswith(self.root):
434 c.readpending('00changelog.i.a')
435 c.readpending('00changelog.i.a')
435 return c
436 return c
436
437
437 @storecache('00manifest.i')
438 @storecache('00manifest.i')
438 def manifest(self):
439 def manifest(self):
439 return manifest.manifest(self.svfs)
440 return manifest.manifest(self.svfs)
440
441
441 @repofilecache('dirstate')
442 @repofilecache('dirstate')
442 def dirstate(self):
443 def dirstate(self):
443 warned = [0]
444 warned = [0]
444 def validate(node):
445 def validate(node):
445 try:
446 try:
446 self.changelog.rev(node)
447 self.changelog.rev(node)
447 return node
448 return node
448 except error.LookupError:
449 except error.LookupError:
449 if not warned[0]:
450 if not warned[0]:
450 warned[0] = True
451 warned[0] = True
451 self.ui.warn(_("warning: ignoring unknown"
452 self.ui.warn(_("warning: ignoring unknown"
452 " working parent %s!\n") % short(node))
453 " working parent %s!\n") % short(node))
453 return nullid
454 return nullid
454
455
455 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
456 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
456
457
457 def __getitem__(self, changeid):
458 def __getitem__(self, changeid):
458 if changeid is None:
459 if changeid is None:
459 return context.workingctx(self)
460 return context.workingctx(self)
460 if isinstance(changeid, slice):
461 if isinstance(changeid, slice):
461 return [context.changectx(self, i)
462 return [context.changectx(self, i)
462 for i in xrange(*changeid.indices(len(self)))
463 for i in xrange(*changeid.indices(len(self)))
463 if i not in self.changelog.filteredrevs]
464 if i not in self.changelog.filteredrevs]
464 return context.changectx(self, changeid)
465 return context.changectx(self, changeid)
465
466
466 def __contains__(self, changeid):
467 def __contains__(self, changeid):
467 try:
468 try:
468 self[changeid]
469 self[changeid]
469 return True
470 return True
470 except error.RepoLookupError:
471 except error.RepoLookupError:
471 return False
472 return False
472
473
473 def __nonzero__(self):
474 def __nonzero__(self):
474 return True
475 return True
475
476
476 def __len__(self):
477 def __len__(self):
477 return len(self.changelog)
478 return len(self.changelog)
478
479
479 def __iter__(self):
480 def __iter__(self):
480 return iter(self.changelog)
481 return iter(self.changelog)
481
482
482 def revs(self, expr, *args):
483 def revs(self, expr, *args):
483 '''Return a list of revisions matching the given revset'''
484 '''Return a list of revisions matching the given revset'''
484 expr = revset.formatspec(expr, *args)
485 expr = revset.formatspec(expr, *args)
485 m = revset.match(None, expr)
486 m = revset.match(None, expr)
486 return m(self)
487 return m(self)
487
488
488 def set(self, expr, *args):
489 def set(self, expr, *args):
489 '''
490 '''
490 Yield a context for each matching revision, after doing arg
491 Yield a context for each matching revision, after doing arg
491 replacement via revset.formatspec
492 replacement via revset.formatspec
492 '''
493 '''
493 for r in self.revs(expr, *args):
494 for r in self.revs(expr, *args):
494 yield self[r]
495 yield self[r]
495
496
496 def url(self):
497 def url(self):
497 return 'file:' + self.root
498 return 'file:' + self.root
498
499
499 def hook(self, name, throw=False, **args):
500 def hook(self, name, throw=False, **args):
500 """Call a hook, passing this repo instance.
501 """Call a hook, passing this repo instance.
501
502
502 This a convenience method to aid invoking hooks. Extensions likely
503 This a convenience method to aid invoking hooks. Extensions likely
503 won't call this unless they have registered a custom hook or are
504 won't call this unless they have registered a custom hook or are
504 replacing code that is expected to call a hook.
505 replacing code that is expected to call a hook.
505 """
506 """
506 return hook.hook(self.ui, self, name, throw, **args)
507 return hook.hook(self.ui, self, name, throw, **args)
507
508
508 @unfilteredmethod
509 @unfilteredmethod
509 def _tag(self, names, node, message, local, user, date, extra={},
510 def _tag(self, names, node, message, local, user, date, extra={},
510 editor=False):
511 editor=False):
511 if isinstance(names, str):
512 if isinstance(names, str):
512 names = (names,)
513 names = (names,)
513
514
514 branches = self.branchmap()
515 branches = self.branchmap()
515 for name in names:
516 for name in names:
516 self.hook('pretag', throw=True, node=hex(node), tag=name,
517 self.hook('pretag', throw=True, node=hex(node), tag=name,
517 local=local)
518 local=local)
518 if name in branches:
519 if name in branches:
519 self.ui.warn(_("warning: tag %s conflicts with existing"
520 self.ui.warn(_("warning: tag %s conflicts with existing"
520 " branch name\n") % name)
521 " branch name\n") % name)
521
522
522 def writetags(fp, names, munge, prevtags):
523 def writetags(fp, names, munge, prevtags):
523 fp.seek(0, 2)
524 fp.seek(0, 2)
524 if prevtags and prevtags[-1] != '\n':
525 if prevtags and prevtags[-1] != '\n':
525 fp.write('\n')
526 fp.write('\n')
526 for name in names:
527 for name in names:
527 if munge:
528 if munge:
528 m = munge(name)
529 m = munge(name)
529 else:
530 else:
530 m = name
531 m = name
531
532
532 if (self._tagscache.tagtypes and
533 if (self._tagscache.tagtypes and
533 name in self._tagscache.tagtypes):
534 name in self._tagscache.tagtypes):
534 old = self.tags().get(name, nullid)
535 old = self.tags().get(name, nullid)
535 fp.write('%s %s\n' % (hex(old), m))
536 fp.write('%s %s\n' % (hex(old), m))
536 fp.write('%s %s\n' % (hex(node), m))
537 fp.write('%s %s\n' % (hex(node), m))
537 fp.close()
538 fp.close()
538
539
539 prevtags = ''
540 prevtags = ''
540 if local:
541 if local:
541 try:
542 try:
542 fp = self.vfs('localtags', 'r+')
543 fp = self.vfs('localtags', 'r+')
543 except IOError:
544 except IOError:
544 fp = self.vfs('localtags', 'a')
545 fp = self.vfs('localtags', 'a')
545 else:
546 else:
546 prevtags = fp.read()
547 prevtags = fp.read()
547
548
548 # local tags are stored in the current charset
549 # local tags are stored in the current charset
549 writetags(fp, names, None, prevtags)
550 writetags(fp, names, None, prevtags)
550 for name in names:
551 for name in names:
551 self.hook('tag', node=hex(node), tag=name, local=local)
552 self.hook('tag', node=hex(node), tag=name, local=local)
552 return
553 return
553
554
554 try:
555 try:
555 fp = self.wfile('.hgtags', 'rb+')
556 fp = self.wfile('.hgtags', 'rb+')
556 except IOError, e:
557 except IOError, e:
557 if e.errno != errno.ENOENT:
558 if e.errno != errno.ENOENT:
558 raise
559 raise
559 fp = self.wfile('.hgtags', 'ab')
560 fp = self.wfile('.hgtags', 'ab')
560 else:
561 else:
561 prevtags = fp.read()
562 prevtags = fp.read()
562
563
563 # committed tags are stored in UTF-8
564 # committed tags are stored in UTF-8
564 writetags(fp, names, encoding.fromlocal, prevtags)
565 writetags(fp, names, encoding.fromlocal, prevtags)
565
566
566 fp.close()
567 fp.close()
567
568
568 self.invalidatecaches()
569 self.invalidatecaches()
569
570
570 if '.hgtags' not in self.dirstate:
571 if '.hgtags' not in self.dirstate:
571 self[None].add(['.hgtags'])
572 self[None].add(['.hgtags'])
572
573
573 m = matchmod.exact(self.root, '', ['.hgtags'])
574 m = matchmod.exact(self.root, '', ['.hgtags'])
574 tagnode = self.commit(message, user, date, extra=extra, match=m,
575 tagnode = self.commit(message, user, date, extra=extra, match=m,
575 editor=editor)
576 editor=editor)
576
577
577 for name in names:
578 for name in names:
578 self.hook('tag', node=hex(node), tag=name, local=local)
579 self.hook('tag', node=hex(node), tag=name, local=local)
579
580
580 return tagnode
581 return tagnode
581
582
582 def tag(self, names, node, message, local, user, date, editor=False):
583 def tag(self, names, node, message, local, user, date, editor=False):
583 '''tag a revision with one or more symbolic names.
584 '''tag a revision with one or more symbolic names.
584
585
585 names is a list of strings or, when adding a single tag, names may be a
586 names is a list of strings or, when adding a single tag, names may be a
586 string.
587 string.
587
588
588 if local is True, the tags are stored in a per-repository file.
589 if local is True, the tags are stored in a per-repository file.
589 otherwise, they are stored in the .hgtags file, and a new
590 otherwise, they are stored in the .hgtags file, and a new
590 changeset is committed with the change.
591 changeset is committed with the change.
591
592
592 keyword arguments:
593 keyword arguments:
593
594
594 local: whether to store tags in non-version-controlled file
595 local: whether to store tags in non-version-controlled file
595 (default False)
596 (default False)
596
597
597 message: commit message to use if committing
598 message: commit message to use if committing
598
599
599 user: name of user to use if committing
600 user: name of user to use if committing
600
601
601 date: date tuple to use if committing'''
602 date: date tuple to use if committing'''
602
603
603 if not local:
604 if not local:
604 m = matchmod.exact(self.root, '', ['.hgtags'])
605 m = matchmod.exact(self.root, '', ['.hgtags'])
605 if util.any(self.status(match=m, unknown=True, ignored=True)):
606 if util.any(self.status(match=m, unknown=True, ignored=True)):
606 raise util.Abort(_('working copy of .hgtags is changed'),
607 raise util.Abort(_('working copy of .hgtags is changed'),
607 hint=_('please commit .hgtags manually'))
608 hint=_('please commit .hgtags manually'))
608
609
609 self.tags() # instantiate the cache
610 self.tags() # instantiate the cache
610 self._tag(names, node, message, local, user, date, editor=editor)
611 self._tag(names, node, message, local, user, date, editor=editor)
611
612
612 @filteredpropertycache
613 @filteredpropertycache
613 def _tagscache(self):
614 def _tagscache(self):
614 '''Returns a tagscache object that contains various tags related
615 '''Returns a tagscache object that contains various tags related
615 caches.'''
616 caches.'''
616
617
617 # This simplifies its cache management by having one decorated
618 # This simplifies its cache management by having one decorated
618 # function (this one) and the rest simply fetch things from it.
619 # function (this one) and the rest simply fetch things from it.
619 class tagscache(object):
620 class tagscache(object):
620 def __init__(self):
621 def __init__(self):
621 # These two define the set of tags for this repository. tags
622 # These two define the set of tags for this repository. tags
622 # maps tag name to node; tagtypes maps tag name to 'global' or
623 # maps tag name to node; tagtypes maps tag name to 'global' or
623 # 'local'. (Global tags are defined by .hgtags across all
624 # 'local'. (Global tags are defined by .hgtags across all
624 # heads, and local tags are defined in .hg/localtags.)
625 # heads, and local tags are defined in .hg/localtags.)
625 # They constitute the in-memory cache of tags.
626 # They constitute the in-memory cache of tags.
626 self.tags = self.tagtypes = None
627 self.tags = self.tagtypes = None
627
628
628 self.nodetagscache = self.tagslist = None
629 self.nodetagscache = self.tagslist = None
629
630
630 cache = tagscache()
631 cache = tagscache()
631 cache.tags, cache.tagtypes = self._findtags()
632 cache.tags, cache.tagtypes = self._findtags()
632
633
633 return cache
634 return cache
634
635
635 def tags(self):
636 def tags(self):
636 '''return a mapping of tag to node'''
637 '''return a mapping of tag to node'''
637 t = {}
638 t = {}
638 if self.changelog.filteredrevs:
639 if self.changelog.filteredrevs:
639 tags, tt = self._findtags()
640 tags, tt = self._findtags()
640 else:
641 else:
641 tags = self._tagscache.tags
642 tags = self._tagscache.tags
642 for k, v in tags.iteritems():
643 for k, v in tags.iteritems():
643 try:
644 try:
644 # ignore tags to unknown nodes
645 # ignore tags to unknown nodes
645 self.changelog.rev(v)
646 self.changelog.rev(v)
646 t[k] = v
647 t[k] = v
647 except (error.LookupError, ValueError):
648 except (error.LookupError, ValueError):
648 pass
649 pass
649 return t
650 return t
650
651
651 def _findtags(self):
652 def _findtags(self):
652 '''Do the hard work of finding tags. Return a pair of dicts
653 '''Do the hard work of finding tags. Return a pair of dicts
653 (tags, tagtypes) where tags maps tag name to node, and tagtypes
654 (tags, tagtypes) where tags maps tag name to node, and tagtypes
654 maps tag name to a string like \'global\' or \'local\'.
655 maps tag name to a string like \'global\' or \'local\'.
655 Subclasses or extensions are free to add their own tags, but
656 Subclasses or extensions are free to add their own tags, but
656 should be aware that the returned dicts will be retained for the
657 should be aware that the returned dicts will be retained for the
657 duration of the localrepo object.'''
658 duration of the localrepo object.'''
658
659
659 # XXX what tagtype should subclasses/extensions use? Currently
660 # XXX what tagtype should subclasses/extensions use? Currently
660 # mq and bookmarks add tags, but do not set the tagtype at all.
661 # mq and bookmarks add tags, but do not set the tagtype at all.
661 # Should each extension invent its own tag type? Should there
662 # Should each extension invent its own tag type? Should there
662 # be one tagtype for all such "virtual" tags? Or is the status
663 # be one tagtype for all such "virtual" tags? Or is the status
663 # quo fine?
664 # quo fine?
664
665
665 alltags = {} # map tag name to (node, hist)
666 alltags = {} # map tag name to (node, hist)
666 tagtypes = {}
667 tagtypes = {}
667
668
668 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
669 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
669 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
670 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
670
671
671 # Build the return dicts. Have to re-encode tag names because
672 # Build the return dicts. Have to re-encode tag names because
672 # the tags module always uses UTF-8 (in order not to lose info
673 # the tags module always uses UTF-8 (in order not to lose info
673 # writing to the cache), but the rest of Mercurial wants them in
674 # writing to the cache), but the rest of Mercurial wants them in
674 # local encoding.
675 # local encoding.
675 tags = {}
676 tags = {}
676 for (name, (node, hist)) in alltags.iteritems():
677 for (name, (node, hist)) in alltags.iteritems():
677 if node != nullid:
678 if node != nullid:
678 tags[encoding.tolocal(name)] = node
679 tags[encoding.tolocal(name)] = node
679 tags['tip'] = self.changelog.tip()
680 tags['tip'] = self.changelog.tip()
680 tagtypes = dict([(encoding.tolocal(name), value)
681 tagtypes = dict([(encoding.tolocal(name), value)
681 for (name, value) in tagtypes.iteritems()])
682 for (name, value) in tagtypes.iteritems()])
682 return (tags, tagtypes)
683 return (tags, tagtypes)
683
684
684 def tagtype(self, tagname):
685 def tagtype(self, tagname):
685 '''
686 '''
686 return the type of the given tag. result can be:
687 return the type of the given tag. result can be:
687
688
688 'local' : a local tag
689 'local' : a local tag
689 'global' : a global tag
690 'global' : a global tag
690 None : tag does not exist
691 None : tag does not exist
691 '''
692 '''
692
693
693 return self._tagscache.tagtypes.get(tagname)
694 return self._tagscache.tagtypes.get(tagname)
694
695
695 def tagslist(self):
696 def tagslist(self):
696 '''return a list of tags ordered by revision'''
697 '''return a list of tags ordered by revision'''
697 if not self._tagscache.tagslist:
698 if not self._tagscache.tagslist:
698 l = []
699 l = []
699 for t, n in self.tags().iteritems():
700 for t, n in self.tags().iteritems():
700 l.append((self.changelog.rev(n), t, n))
701 l.append((self.changelog.rev(n), t, n))
701 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
702 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
702
703
703 return self._tagscache.tagslist
704 return self._tagscache.tagslist
704
705
705 def nodetags(self, node):
706 def nodetags(self, node):
706 '''return the tags associated with a node'''
707 '''return the tags associated with a node'''
707 if not self._tagscache.nodetagscache:
708 if not self._tagscache.nodetagscache:
708 nodetagscache = {}
709 nodetagscache = {}
709 for t, n in self._tagscache.tags.iteritems():
710 for t, n in self._tagscache.tags.iteritems():
710 nodetagscache.setdefault(n, []).append(t)
711 nodetagscache.setdefault(n, []).append(t)
711 for tags in nodetagscache.itervalues():
712 for tags in nodetagscache.itervalues():
712 tags.sort()
713 tags.sort()
713 self._tagscache.nodetagscache = nodetagscache
714 self._tagscache.nodetagscache = nodetagscache
714 return self._tagscache.nodetagscache.get(node, [])
715 return self._tagscache.nodetagscache.get(node, [])
715
716
716 def nodebookmarks(self, node):
717 def nodebookmarks(self, node):
717 marks = []
718 marks = []
718 for bookmark, n in self._bookmarks.iteritems():
719 for bookmark, n in self._bookmarks.iteritems():
719 if n == node:
720 if n == node:
720 marks.append(bookmark)
721 marks.append(bookmark)
721 return sorted(marks)
722 return sorted(marks)
722
723
723 def branchmap(self):
724 def branchmap(self):
724 '''returns a dictionary {branch: [branchheads]} with branchheads
725 '''returns a dictionary {branch: [branchheads]} with branchheads
725 ordered by increasing revision number'''
726 ordered by increasing revision number'''
726 branchmap.updatecache(self)
727 branchmap.updatecache(self)
727 return self._branchcaches[self.filtername]
728 return self._branchcaches[self.filtername]
728
729
730 @unfilteredmethod
731 def revbranchcache(self):
732 if not self._revbranchcache:
733 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
734 return self._revbranchcache
735
729 def branchtip(self, branch, ignoremissing=False):
736 def branchtip(self, branch, ignoremissing=False):
730 '''return the tip node for a given branch
737 '''return the tip node for a given branch
731
738
732 If ignoremissing is True, then this method will not raise an error.
739 If ignoremissing is True, then this method will not raise an error.
733 This is helpful for callers that only expect None for a missing branch
740 This is helpful for callers that only expect None for a missing branch
734 (e.g. namespace).
741 (e.g. namespace).
735
742
736 '''
743 '''
737 try:
744 try:
738 return self.branchmap().branchtip(branch)
745 return self.branchmap().branchtip(branch)
739 except KeyError:
746 except KeyError:
740 if not ignoremissing:
747 if not ignoremissing:
741 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
748 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
742 else:
749 else:
743 pass
750 pass
744
751
745 def lookup(self, key):
752 def lookup(self, key):
746 return self[key].node()
753 return self[key].node()
747
754
748 def lookupbranch(self, key, remote=None):
755 def lookupbranch(self, key, remote=None):
749 repo = remote or self
756 repo = remote or self
750 if key in repo.branchmap():
757 if key in repo.branchmap():
751 return key
758 return key
752
759
753 repo = (remote and remote.local()) and remote or self
760 repo = (remote and remote.local()) and remote or self
754 return repo[key].branch()
761 return repo[key].branch()
755
762
756 def known(self, nodes):
763 def known(self, nodes):
757 nm = self.changelog.nodemap
764 nm = self.changelog.nodemap
758 pc = self._phasecache
765 pc = self._phasecache
759 result = []
766 result = []
760 for n in nodes:
767 for n in nodes:
761 r = nm.get(n)
768 r = nm.get(n)
762 resp = not (r is None or pc.phase(self, r) >= phases.secret)
769 resp = not (r is None or pc.phase(self, r) >= phases.secret)
763 result.append(resp)
770 result.append(resp)
764 return result
771 return result
765
772
766 def local(self):
773 def local(self):
767 return self
774 return self
768
775
769 def cancopy(self):
776 def cancopy(self):
770 # so statichttprepo's override of local() works
777 # so statichttprepo's override of local() works
771 if not self.local():
778 if not self.local():
772 return False
779 return False
773 if not self.ui.configbool('phases', 'publish', True):
780 if not self.ui.configbool('phases', 'publish', True):
774 return True
781 return True
775 # if publishing we can't copy if there is filtered content
782 # if publishing we can't copy if there is filtered content
776 return not self.filtered('visible').changelog.filteredrevs
783 return not self.filtered('visible').changelog.filteredrevs
777
784
778 def shared(self):
785 def shared(self):
779 '''the type of shared repository (None if not shared)'''
786 '''the type of shared repository (None if not shared)'''
780 if self.sharedpath != self.path:
787 if self.sharedpath != self.path:
781 return 'store'
788 return 'store'
782 return None
789 return None
783
790
784 def join(self, f, *insidef):
791 def join(self, f, *insidef):
785 return self.vfs.join(os.path.join(f, *insidef))
792 return self.vfs.join(os.path.join(f, *insidef))
786
793
787 def wjoin(self, f, *insidef):
794 def wjoin(self, f, *insidef):
788 return self.vfs.reljoin(self.root, f, *insidef)
795 return self.vfs.reljoin(self.root, f, *insidef)
789
796
790 def file(self, f):
797 def file(self, f):
791 if f[0] == '/':
798 if f[0] == '/':
792 f = f[1:]
799 f = f[1:]
793 return filelog.filelog(self.svfs, f)
800 return filelog.filelog(self.svfs, f)
794
801
795 def changectx(self, changeid):
802 def changectx(self, changeid):
796 return self[changeid]
803 return self[changeid]
797
804
798 def parents(self, changeid=None):
805 def parents(self, changeid=None):
799 '''get list of changectxs for parents of changeid'''
806 '''get list of changectxs for parents of changeid'''
800 return self[changeid].parents()
807 return self[changeid].parents()
801
808
802 def setparents(self, p1, p2=nullid):
809 def setparents(self, p1, p2=nullid):
803 self.dirstate.beginparentchange()
810 self.dirstate.beginparentchange()
804 copies = self.dirstate.setparents(p1, p2)
811 copies = self.dirstate.setparents(p1, p2)
805 pctx = self[p1]
812 pctx = self[p1]
806 if copies:
813 if copies:
807 # Adjust copy records, the dirstate cannot do it, it
814 # Adjust copy records, the dirstate cannot do it, it
808 # requires access to parents manifests. Preserve them
815 # requires access to parents manifests. Preserve them
809 # only for entries added to first parent.
816 # only for entries added to first parent.
810 for f in copies:
817 for f in copies:
811 if f not in pctx and copies[f] in pctx:
818 if f not in pctx and copies[f] in pctx:
812 self.dirstate.copy(copies[f], f)
819 self.dirstate.copy(copies[f], f)
813 if p2 == nullid:
820 if p2 == nullid:
814 for f, s in sorted(self.dirstate.copies().items()):
821 for f, s in sorted(self.dirstate.copies().items()):
815 if f not in pctx and s not in pctx:
822 if f not in pctx and s not in pctx:
816 self.dirstate.copy(None, f)
823 self.dirstate.copy(None, f)
817 self.dirstate.endparentchange()
824 self.dirstate.endparentchange()
818
825
819 def filectx(self, path, changeid=None, fileid=None):
826 def filectx(self, path, changeid=None, fileid=None):
820 """changeid can be a changeset revision, node, or tag.
827 """changeid can be a changeset revision, node, or tag.
821 fileid can be a file revision or node."""
828 fileid can be a file revision or node."""
822 return context.filectx(self, path, changeid, fileid)
829 return context.filectx(self, path, changeid, fileid)
823
830
824 def getcwd(self):
831 def getcwd(self):
825 return self.dirstate.getcwd()
832 return self.dirstate.getcwd()
826
833
827 def pathto(self, f, cwd=None):
834 def pathto(self, f, cwd=None):
828 return self.dirstate.pathto(f, cwd)
835 return self.dirstate.pathto(f, cwd)
829
836
830 def wfile(self, f, mode='r'):
837 def wfile(self, f, mode='r'):
831 return self.wvfs(f, mode)
838 return self.wvfs(f, mode)
832
839
833 def _link(self, f):
840 def _link(self, f):
834 return self.wvfs.islink(f)
841 return self.wvfs.islink(f)
835
842
836 def _loadfilter(self, filter):
843 def _loadfilter(self, filter):
837 if filter not in self.filterpats:
844 if filter not in self.filterpats:
838 l = []
845 l = []
839 for pat, cmd in self.ui.configitems(filter):
846 for pat, cmd in self.ui.configitems(filter):
840 if cmd == '!':
847 if cmd == '!':
841 continue
848 continue
842 mf = matchmod.match(self.root, '', [pat])
849 mf = matchmod.match(self.root, '', [pat])
843 fn = None
850 fn = None
844 params = cmd
851 params = cmd
845 for name, filterfn in self._datafilters.iteritems():
852 for name, filterfn in self._datafilters.iteritems():
846 if cmd.startswith(name):
853 if cmd.startswith(name):
847 fn = filterfn
854 fn = filterfn
848 params = cmd[len(name):].lstrip()
855 params = cmd[len(name):].lstrip()
849 break
856 break
850 if not fn:
857 if not fn:
851 fn = lambda s, c, **kwargs: util.filter(s, c)
858 fn = lambda s, c, **kwargs: util.filter(s, c)
852 # Wrap old filters not supporting keyword arguments
859 # Wrap old filters not supporting keyword arguments
853 if not inspect.getargspec(fn)[2]:
860 if not inspect.getargspec(fn)[2]:
854 oldfn = fn
861 oldfn = fn
855 fn = lambda s, c, **kwargs: oldfn(s, c)
862 fn = lambda s, c, **kwargs: oldfn(s, c)
856 l.append((mf, fn, params))
863 l.append((mf, fn, params))
857 self.filterpats[filter] = l
864 self.filterpats[filter] = l
858 return self.filterpats[filter]
865 return self.filterpats[filter]
859
866
860 def _filter(self, filterpats, filename, data):
867 def _filter(self, filterpats, filename, data):
861 for mf, fn, cmd in filterpats:
868 for mf, fn, cmd in filterpats:
862 if mf(filename):
869 if mf(filename):
863 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
870 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
864 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
871 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
865 break
872 break
866
873
867 return data
874 return data
868
875
869 @unfilteredpropertycache
876 @unfilteredpropertycache
870 def _encodefilterpats(self):
877 def _encodefilterpats(self):
871 return self._loadfilter('encode')
878 return self._loadfilter('encode')
872
879
873 @unfilteredpropertycache
880 @unfilteredpropertycache
874 def _decodefilterpats(self):
881 def _decodefilterpats(self):
875 return self._loadfilter('decode')
882 return self._loadfilter('decode')
876
883
877 def adddatafilter(self, name, filter):
884 def adddatafilter(self, name, filter):
878 self._datafilters[name] = filter
885 self._datafilters[name] = filter
879
886
880 def wread(self, filename):
887 def wread(self, filename):
881 if self._link(filename):
888 if self._link(filename):
882 data = self.wvfs.readlink(filename)
889 data = self.wvfs.readlink(filename)
883 else:
890 else:
884 data = self.wvfs.read(filename)
891 data = self.wvfs.read(filename)
885 return self._filter(self._encodefilterpats, filename, data)
892 return self._filter(self._encodefilterpats, filename, data)
886
893
887 def wwrite(self, filename, data, flags):
894 def wwrite(self, filename, data, flags):
888 data = self._filter(self._decodefilterpats, filename, data)
895 data = self._filter(self._decodefilterpats, filename, data)
889 if 'l' in flags:
896 if 'l' in flags:
890 self.wvfs.symlink(data, filename)
897 self.wvfs.symlink(data, filename)
891 else:
898 else:
892 self.wvfs.write(filename, data)
899 self.wvfs.write(filename, data)
893 if 'x' in flags:
900 if 'x' in flags:
894 self.wvfs.setflags(filename, False, True)
901 self.wvfs.setflags(filename, False, True)
895
902
896 def wwritedata(self, filename, data):
903 def wwritedata(self, filename, data):
897 return self._filter(self._decodefilterpats, filename, data)
904 return self._filter(self._decodefilterpats, filename, data)
898
905
899 def currenttransaction(self):
906 def currenttransaction(self):
900 """return the current transaction or None if non exists"""
907 """return the current transaction or None if non exists"""
901 if self._transref:
908 if self._transref:
902 tr = self._transref()
909 tr = self._transref()
903 else:
910 else:
904 tr = None
911 tr = None
905
912
906 if tr and tr.running():
913 if tr and tr.running():
907 return tr
914 return tr
908 return None
915 return None
909
916
910 def transaction(self, desc, report=None):
917 def transaction(self, desc, report=None):
911 tr = self.currenttransaction()
918 tr = self.currenttransaction()
912 if tr is not None:
919 if tr is not None:
913 return tr.nest()
920 return tr.nest()
914
921
915 # abort here if the journal already exists
922 # abort here if the journal already exists
916 if self.svfs.exists("journal"):
923 if self.svfs.exists("journal"):
917 raise error.RepoError(
924 raise error.RepoError(
918 _("abandoned transaction found"),
925 _("abandoned transaction found"),
919 hint=_("run 'hg recover' to clean up transaction"))
926 hint=_("run 'hg recover' to clean up transaction"))
920
927
921 self.hook('pretxnopen', throw=True, txnname=desc)
928 self.hook('pretxnopen', throw=True, txnname=desc)
922
929
923 self._writejournal(desc)
930 self._writejournal(desc)
924 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
931 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
925 if report:
932 if report:
926 rp = report
933 rp = report
927 else:
934 else:
928 rp = self.ui.warn
935 rp = self.ui.warn
929 vfsmap = {'plain': self.vfs} # root of .hg/
936 vfsmap = {'plain': self.vfs} # root of .hg/
930 # we must avoid cyclic reference between repo and transaction.
937 # we must avoid cyclic reference between repo and transaction.
931 reporef = weakref.ref(self)
938 reporef = weakref.ref(self)
932 def validate(tr):
939 def validate(tr):
933 """will run pre-closing hooks"""
940 """will run pre-closing hooks"""
934 pending = lambda: tr.writepending() and self.root or ""
941 pending = lambda: tr.writepending() and self.root or ""
935 reporef().hook('pretxnclose', throw=True, pending=pending,
942 reporef().hook('pretxnclose', throw=True, pending=pending,
936 xnname=desc)
943 xnname=desc)
937
944
938 tr = transaction.transaction(rp, self.sopener, vfsmap,
945 tr = transaction.transaction(rp, self.sopener, vfsmap,
939 "journal",
946 "journal",
940 "undo",
947 "undo",
941 aftertrans(renames),
948 aftertrans(renames),
942 self.store.createmode,
949 self.store.createmode,
943 validator=validate)
950 validator=validate)
944 # note: writing the fncache only during finalize mean that the file is
951 # note: writing the fncache only during finalize mean that the file is
945 # outdated when running hooks. As fncache is used for streaming clone,
952 # outdated when running hooks. As fncache is used for streaming clone,
946 # this is not expected to break anything that happen during the hooks.
953 # this is not expected to break anything that happen during the hooks.
947 tr.addfinalize('flush-fncache', self.store.write)
954 tr.addfinalize('flush-fncache', self.store.write)
948 def txnclosehook(tr2):
955 def txnclosehook(tr2):
949 """To be run if transaction is successful, will schedule a hook run
956 """To be run if transaction is successful, will schedule a hook run
950 """
957 """
951 def hook():
958 def hook():
952 reporef().hook('txnclose', throw=False, txnname=desc,
959 reporef().hook('txnclose', throw=False, txnname=desc,
953 **tr2.hookargs)
960 **tr2.hookargs)
954 reporef()._afterlock(hook)
961 reporef()._afterlock(hook)
955 tr.addfinalize('txnclose-hook', txnclosehook)
962 tr.addfinalize('txnclose-hook', txnclosehook)
956 self._transref = weakref.ref(tr)
963 self._transref = weakref.ref(tr)
957 return tr
964 return tr
958
965
959 def _journalfiles(self):
966 def _journalfiles(self):
960 return ((self.svfs, 'journal'),
967 return ((self.svfs, 'journal'),
961 (self.vfs, 'journal.dirstate'),
968 (self.vfs, 'journal.dirstate'),
962 (self.vfs, 'journal.branch'),
969 (self.vfs, 'journal.branch'),
963 (self.vfs, 'journal.desc'),
970 (self.vfs, 'journal.desc'),
964 (self.vfs, 'journal.bookmarks'),
971 (self.vfs, 'journal.bookmarks'),
965 (self.svfs, 'journal.phaseroots'))
972 (self.svfs, 'journal.phaseroots'))
966
973
967 def undofiles(self):
974 def undofiles(self):
968 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
975 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
969
976
970 def _writejournal(self, desc):
977 def _writejournal(self, desc):
971 self.vfs.write("journal.dirstate",
978 self.vfs.write("journal.dirstate",
972 self.vfs.tryread("dirstate"))
979 self.vfs.tryread("dirstate"))
973 self.vfs.write("journal.branch",
980 self.vfs.write("journal.branch",
974 encoding.fromlocal(self.dirstate.branch()))
981 encoding.fromlocal(self.dirstate.branch()))
975 self.vfs.write("journal.desc",
982 self.vfs.write("journal.desc",
976 "%d\n%s\n" % (len(self), desc))
983 "%d\n%s\n" % (len(self), desc))
977 self.vfs.write("journal.bookmarks",
984 self.vfs.write("journal.bookmarks",
978 self.vfs.tryread("bookmarks"))
985 self.vfs.tryread("bookmarks"))
979 self.svfs.write("journal.phaseroots",
986 self.svfs.write("journal.phaseroots",
980 self.svfs.tryread("phaseroots"))
987 self.svfs.tryread("phaseroots"))
981
988
982 def recover(self):
989 def recover(self):
983 lock = self.lock()
990 lock = self.lock()
984 try:
991 try:
985 if self.svfs.exists("journal"):
992 if self.svfs.exists("journal"):
986 self.ui.status(_("rolling back interrupted transaction\n"))
993 self.ui.status(_("rolling back interrupted transaction\n"))
987 vfsmap = {'': self.svfs,
994 vfsmap = {'': self.svfs,
988 'plain': self.vfs,}
995 'plain': self.vfs,}
989 transaction.rollback(self.svfs, vfsmap, "journal",
996 transaction.rollback(self.svfs, vfsmap, "journal",
990 self.ui.warn)
997 self.ui.warn)
991 self.invalidate()
998 self.invalidate()
992 return True
999 return True
993 else:
1000 else:
994 self.ui.warn(_("no interrupted transaction available\n"))
1001 self.ui.warn(_("no interrupted transaction available\n"))
995 return False
1002 return False
996 finally:
1003 finally:
997 lock.release()
1004 lock.release()
998
1005
999 def rollback(self, dryrun=False, force=False):
1006 def rollback(self, dryrun=False, force=False):
1000 wlock = lock = None
1007 wlock = lock = None
1001 try:
1008 try:
1002 wlock = self.wlock()
1009 wlock = self.wlock()
1003 lock = self.lock()
1010 lock = self.lock()
1004 if self.svfs.exists("undo"):
1011 if self.svfs.exists("undo"):
1005 return self._rollback(dryrun, force)
1012 return self._rollback(dryrun, force)
1006 else:
1013 else:
1007 self.ui.warn(_("no rollback information available\n"))
1014 self.ui.warn(_("no rollback information available\n"))
1008 return 1
1015 return 1
1009 finally:
1016 finally:
1010 release(lock, wlock)
1017 release(lock, wlock)
1011
1018
1012 @unfilteredmethod # Until we get smarter cache management
1019 @unfilteredmethod # Until we get smarter cache management
1013 def _rollback(self, dryrun, force):
1020 def _rollback(self, dryrun, force):
1014 ui = self.ui
1021 ui = self.ui
1015 try:
1022 try:
1016 args = self.vfs.read('undo.desc').splitlines()
1023 args = self.vfs.read('undo.desc').splitlines()
1017 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1024 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1018 if len(args) >= 3:
1025 if len(args) >= 3:
1019 detail = args[2]
1026 detail = args[2]
1020 oldtip = oldlen - 1
1027 oldtip = oldlen - 1
1021
1028
1022 if detail and ui.verbose:
1029 if detail and ui.verbose:
1023 msg = (_('repository tip rolled back to revision %s'
1030 msg = (_('repository tip rolled back to revision %s'
1024 ' (undo %s: %s)\n')
1031 ' (undo %s: %s)\n')
1025 % (oldtip, desc, detail))
1032 % (oldtip, desc, detail))
1026 else:
1033 else:
1027 msg = (_('repository tip rolled back to revision %s'
1034 msg = (_('repository tip rolled back to revision %s'
1028 ' (undo %s)\n')
1035 ' (undo %s)\n')
1029 % (oldtip, desc))
1036 % (oldtip, desc))
1030 except IOError:
1037 except IOError:
1031 msg = _('rolling back unknown transaction\n')
1038 msg = _('rolling back unknown transaction\n')
1032 desc = None
1039 desc = None
1033
1040
1034 if not force and self['.'] != self['tip'] and desc == 'commit':
1041 if not force and self['.'] != self['tip'] and desc == 'commit':
1035 raise util.Abort(
1042 raise util.Abort(
1036 _('rollback of last commit while not checked out '
1043 _('rollback of last commit while not checked out '
1037 'may lose data'), hint=_('use -f to force'))
1044 'may lose data'), hint=_('use -f to force'))
1038
1045
1039 ui.status(msg)
1046 ui.status(msg)
1040 if dryrun:
1047 if dryrun:
1041 return 0
1048 return 0
1042
1049
1043 parents = self.dirstate.parents()
1050 parents = self.dirstate.parents()
1044 self.destroying()
1051 self.destroying()
1045 vfsmap = {'plain': self.vfs, '': self.svfs}
1052 vfsmap = {'plain': self.vfs, '': self.svfs}
1046 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1053 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1047 if self.vfs.exists('undo.bookmarks'):
1054 if self.vfs.exists('undo.bookmarks'):
1048 self.vfs.rename('undo.bookmarks', 'bookmarks')
1055 self.vfs.rename('undo.bookmarks', 'bookmarks')
1049 if self.svfs.exists('undo.phaseroots'):
1056 if self.svfs.exists('undo.phaseroots'):
1050 self.svfs.rename('undo.phaseroots', 'phaseroots')
1057 self.svfs.rename('undo.phaseroots', 'phaseroots')
1051 self.invalidate()
1058 self.invalidate()
1052
1059
1053 parentgone = (parents[0] not in self.changelog.nodemap or
1060 parentgone = (parents[0] not in self.changelog.nodemap or
1054 parents[1] not in self.changelog.nodemap)
1061 parents[1] not in self.changelog.nodemap)
1055 if parentgone:
1062 if parentgone:
1056 self.vfs.rename('undo.dirstate', 'dirstate')
1063 self.vfs.rename('undo.dirstate', 'dirstate')
1057 try:
1064 try:
1058 branch = self.vfs.read('undo.branch')
1065 branch = self.vfs.read('undo.branch')
1059 self.dirstate.setbranch(encoding.tolocal(branch))
1066 self.dirstate.setbranch(encoding.tolocal(branch))
1060 except IOError:
1067 except IOError:
1061 ui.warn(_('named branch could not be reset: '
1068 ui.warn(_('named branch could not be reset: '
1062 'current branch is still \'%s\'\n')
1069 'current branch is still \'%s\'\n')
1063 % self.dirstate.branch())
1070 % self.dirstate.branch())
1064
1071
1065 self.dirstate.invalidate()
1072 self.dirstate.invalidate()
1066 parents = tuple([p.rev() for p in self.parents()])
1073 parents = tuple([p.rev() for p in self.parents()])
1067 if len(parents) > 1:
1074 if len(parents) > 1:
1068 ui.status(_('working directory now based on '
1075 ui.status(_('working directory now based on '
1069 'revisions %d and %d\n') % parents)
1076 'revisions %d and %d\n') % parents)
1070 else:
1077 else:
1071 ui.status(_('working directory now based on '
1078 ui.status(_('working directory now based on '
1072 'revision %d\n') % parents)
1079 'revision %d\n') % parents)
1073 # TODO: if we know which new heads may result from this rollback, pass
1080 # TODO: if we know which new heads may result from this rollback, pass
1074 # them to destroy(), which will prevent the branchhead cache from being
1081 # them to destroy(), which will prevent the branchhead cache from being
1075 # invalidated.
1082 # invalidated.
1076 self.destroyed()
1083 self.destroyed()
1077 return 0
1084 return 0
1078
1085
1079 def invalidatecaches(self):
1086 def invalidatecaches(self):
1080
1087
1081 if '_tagscache' in vars(self):
1088 if '_tagscache' in vars(self):
1082 # can't use delattr on proxy
1089 # can't use delattr on proxy
1083 del self.__dict__['_tagscache']
1090 del self.__dict__['_tagscache']
1084
1091
1085 self.unfiltered()._branchcaches.clear()
1092 self.unfiltered()._branchcaches.clear()
1086 self.invalidatevolatilesets()
1093 self.invalidatevolatilesets()
1087
1094
1088 def invalidatevolatilesets(self):
1095 def invalidatevolatilesets(self):
1089 self.filteredrevcache.clear()
1096 self.filteredrevcache.clear()
1090 obsolete.clearobscaches(self)
1097 obsolete.clearobscaches(self)
1091
1098
1092 def invalidatedirstate(self):
1099 def invalidatedirstate(self):
1093 '''Invalidates the dirstate, causing the next call to dirstate
1100 '''Invalidates the dirstate, causing the next call to dirstate
1094 to check if it was modified since the last time it was read,
1101 to check if it was modified since the last time it was read,
1095 rereading it if it has.
1102 rereading it if it has.
1096
1103
1097 This is different to dirstate.invalidate() that it doesn't always
1104 This is different to dirstate.invalidate() that it doesn't always
1098 rereads the dirstate. Use dirstate.invalidate() if you want to
1105 rereads the dirstate. Use dirstate.invalidate() if you want to
1099 explicitly read the dirstate again (i.e. restoring it to a previous
1106 explicitly read the dirstate again (i.e. restoring it to a previous
1100 known good state).'''
1107 known good state).'''
1101 if hasunfilteredcache(self, 'dirstate'):
1108 if hasunfilteredcache(self, 'dirstate'):
1102 for k in self.dirstate._filecache:
1109 for k in self.dirstate._filecache:
1103 try:
1110 try:
1104 delattr(self.dirstate, k)
1111 delattr(self.dirstate, k)
1105 except AttributeError:
1112 except AttributeError:
1106 pass
1113 pass
1107 delattr(self.unfiltered(), 'dirstate')
1114 delattr(self.unfiltered(), 'dirstate')
1108
1115
1109 def invalidate(self):
1116 def invalidate(self):
1110 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1117 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1111 for k in self._filecache:
1118 for k in self._filecache:
1112 # dirstate is invalidated separately in invalidatedirstate()
1119 # dirstate is invalidated separately in invalidatedirstate()
1113 if k == 'dirstate':
1120 if k == 'dirstate':
1114 continue
1121 continue
1115
1122
1116 try:
1123 try:
1117 delattr(unfiltered, k)
1124 delattr(unfiltered, k)
1118 except AttributeError:
1125 except AttributeError:
1119 pass
1126 pass
1120 self.invalidatecaches()
1127 self.invalidatecaches()
1121 self.store.invalidatecaches()
1128 self.store.invalidatecaches()
1122
1129
1123 def invalidateall(self):
1130 def invalidateall(self):
1124 '''Fully invalidates both store and non-store parts, causing the
1131 '''Fully invalidates both store and non-store parts, causing the
1125 subsequent operation to reread any outside changes.'''
1132 subsequent operation to reread any outside changes.'''
1126 # extension should hook this to invalidate its caches
1133 # extension should hook this to invalidate its caches
1127 self.invalidate()
1134 self.invalidate()
1128 self.invalidatedirstate()
1135 self.invalidatedirstate()
1129
1136
1130 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1137 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1131 try:
1138 try:
1132 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1139 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1133 except error.LockHeld, inst:
1140 except error.LockHeld, inst:
1134 if not wait:
1141 if not wait:
1135 raise
1142 raise
1136 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1143 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1137 (desc, inst.locker))
1144 (desc, inst.locker))
1138 # default to 600 seconds timeout
1145 # default to 600 seconds timeout
1139 l = lockmod.lock(vfs, lockname,
1146 l = lockmod.lock(vfs, lockname,
1140 int(self.ui.config("ui", "timeout", "600")),
1147 int(self.ui.config("ui", "timeout", "600")),
1141 releasefn, desc=desc)
1148 releasefn, desc=desc)
1142 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1149 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1143 if acquirefn:
1150 if acquirefn:
1144 acquirefn()
1151 acquirefn()
1145 return l
1152 return l
1146
1153
1147 def _afterlock(self, callback):
1154 def _afterlock(self, callback):
1148 """add a callback to the current repository lock.
1155 """add a callback to the current repository lock.
1149
1156
1150 The callback will be executed on lock release."""
1157 The callback will be executed on lock release."""
1151 l = self._lockref and self._lockref()
1158 l = self._lockref and self._lockref()
1152 if l:
1159 if l:
1153 l.postrelease.append(callback)
1160 l.postrelease.append(callback)
1154 else:
1161 else:
1155 callback()
1162 callback()
1156
1163
1157 def lock(self, wait=True):
1164 def lock(self, wait=True):
1158 '''Lock the repository store (.hg/store) and return a weak reference
1165 '''Lock the repository store (.hg/store) and return a weak reference
1159 to the lock. Use this before modifying the store (e.g. committing or
1166 to the lock. Use this before modifying the store (e.g. committing or
1160 stripping). If you are opening a transaction, get a lock as well.)'''
1167 stripping). If you are opening a transaction, get a lock as well.)'''
1161 l = self._lockref and self._lockref()
1168 l = self._lockref and self._lockref()
1162 if l is not None and l.held:
1169 if l is not None and l.held:
1163 l.lock()
1170 l.lock()
1164 return l
1171 return l
1165
1172
1166 def unlock():
1173 def unlock():
1167 for k, ce in self._filecache.items():
1174 for k, ce in self._filecache.items():
1168 if k == 'dirstate' or k not in self.__dict__:
1175 if k == 'dirstate' or k not in self.__dict__:
1169 continue
1176 continue
1170 ce.refresh()
1177 ce.refresh()
1171
1178
1172 l = self._lock(self.svfs, "lock", wait, unlock,
1179 l = self._lock(self.svfs, "lock", wait, unlock,
1173 self.invalidate, _('repository %s') % self.origroot)
1180 self.invalidate, _('repository %s') % self.origroot)
1174 self._lockref = weakref.ref(l)
1181 self._lockref = weakref.ref(l)
1175 return l
1182 return l
1176
1183
1177 def wlock(self, wait=True):
1184 def wlock(self, wait=True):
1178 '''Lock the non-store parts of the repository (everything under
1185 '''Lock the non-store parts of the repository (everything under
1179 .hg except .hg/store) and return a weak reference to the lock.
1186 .hg except .hg/store) and return a weak reference to the lock.
1180 Use this before modifying files in .hg.'''
1187 Use this before modifying files in .hg.'''
1181 l = self._wlockref and self._wlockref()
1188 l = self._wlockref and self._wlockref()
1182 if l is not None and l.held:
1189 if l is not None and l.held:
1183 l.lock()
1190 l.lock()
1184 return l
1191 return l
1185
1192
1186 def unlock():
1193 def unlock():
1187 if self.dirstate.pendingparentchange():
1194 if self.dirstate.pendingparentchange():
1188 self.dirstate.invalidate()
1195 self.dirstate.invalidate()
1189 else:
1196 else:
1190 self.dirstate.write()
1197 self.dirstate.write()
1191
1198
1192 self._filecache['dirstate'].refresh()
1199 self._filecache['dirstate'].refresh()
1193
1200
1194 l = self._lock(self.vfs, "wlock", wait, unlock,
1201 l = self._lock(self.vfs, "wlock", wait, unlock,
1195 self.invalidatedirstate, _('working directory of %s') %
1202 self.invalidatedirstate, _('working directory of %s') %
1196 self.origroot)
1203 self.origroot)
1197 self._wlockref = weakref.ref(l)
1204 self._wlockref = weakref.ref(l)
1198 return l
1205 return l
1199
1206
1200 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1207 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1201 """
1208 """
1202 commit an individual file as part of a larger transaction
1209 commit an individual file as part of a larger transaction
1203 """
1210 """
1204
1211
1205 fname = fctx.path()
1212 fname = fctx.path()
1206 text = fctx.data()
1213 text = fctx.data()
1207 flog = self.file(fname)
1214 flog = self.file(fname)
1208 fparent1 = manifest1.get(fname, nullid)
1215 fparent1 = manifest1.get(fname, nullid)
1209 fparent2 = manifest2.get(fname, nullid)
1216 fparent2 = manifest2.get(fname, nullid)
1210
1217
1211 meta = {}
1218 meta = {}
1212 copy = fctx.renamed()
1219 copy = fctx.renamed()
1213 if copy and copy[0] != fname:
1220 if copy and copy[0] != fname:
1214 # Mark the new revision of this file as a copy of another
1221 # Mark the new revision of this file as a copy of another
1215 # file. This copy data will effectively act as a parent
1222 # file. This copy data will effectively act as a parent
1216 # of this new revision. If this is a merge, the first
1223 # of this new revision. If this is a merge, the first
1217 # parent will be the nullid (meaning "look up the copy data")
1224 # parent will be the nullid (meaning "look up the copy data")
1218 # and the second one will be the other parent. For example:
1225 # and the second one will be the other parent. For example:
1219 #
1226 #
1220 # 0 --- 1 --- 3 rev1 changes file foo
1227 # 0 --- 1 --- 3 rev1 changes file foo
1221 # \ / rev2 renames foo to bar and changes it
1228 # \ / rev2 renames foo to bar and changes it
1222 # \- 2 -/ rev3 should have bar with all changes and
1229 # \- 2 -/ rev3 should have bar with all changes and
1223 # should record that bar descends from
1230 # should record that bar descends from
1224 # bar in rev2 and foo in rev1
1231 # bar in rev2 and foo in rev1
1225 #
1232 #
1226 # this allows this merge to succeed:
1233 # this allows this merge to succeed:
1227 #
1234 #
1228 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1235 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1229 # \ / merging rev3 and rev4 should use bar@rev2
1236 # \ / merging rev3 and rev4 should use bar@rev2
1230 # \- 2 --- 4 as the merge base
1237 # \- 2 --- 4 as the merge base
1231 #
1238 #
1232
1239
1233 cfname = copy[0]
1240 cfname = copy[0]
1234 crev = manifest1.get(cfname)
1241 crev = manifest1.get(cfname)
1235 newfparent = fparent2
1242 newfparent = fparent2
1236
1243
1237 if manifest2: # branch merge
1244 if manifest2: # branch merge
1238 if fparent2 == nullid or crev is None: # copied on remote side
1245 if fparent2 == nullid or crev is None: # copied on remote side
1239 if cfname in manifest2:
1246 if cfname in manifest2:
1240 crev = manifest2[cfname]
1247 crev = manifest2[cfname]
1241 newfparent = fparent1
1248 newfparent = fparent1
1242
1249
1243 # Here, we used to search backwards through history to try to find
1250 # Here, we used to search backwards through history to try to find
1244 # where the file copy came from if the source of a copy was not in
1251 # where the file copy came from if the source of a copy was not in
1245 # the parent directory. However, this doesn't actually make sense to
1252 # the parent directory. However, this doesn't actually make sense to
1246 # do (what does a copy from something not in your working copy even
1253 # do (what does a copy from something not in your working copy even
1247 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1254 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1248 # the user that copy information was dropped, so if they didn't
1255 # the user that copy information was dropped, so if they didn't
1249 # expect this outcome it can be fixed, but this is the correct
1256 # expect this outcome it can be fixed, but this is the correct
1250 # behavior in this circumstance.
1257 # behavior in this circumstance.
1251
1258
1252 if crev:
1259 if crev:
1253 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1260 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1254 meta["copy"] = cfname
1261 meta["copy"] = cfname
1255 meta["copyrev"] = hex(crev)
1262 meta["copyrev"] = hex(crev)
1256 fparent1, fparent2 = nullid, newfparent
1263 fparent1, fparent2 = nullid, newfparent
1257 else:
1264 else:
1258 self.ui.warn(_("warning: can't find ancestor for '%s' "
1265 self.ui.warn(_("warning: can't find ancestor for '%s' "
1259 "copied from '%s'!\n") % (fname, cfname))
1266 "copied from '%s'!\n") % (fname, cfname))
1260
1267
1261 elif fparent1 == nullid:
1268 elif fparent1 == nullid:
1262 fparent1, fparent2 = fparent2, nullid
1269 fparent1, fparent2 = fparent2, nullid
1263 elif fparent2 != nullid:
1270 elif fparent2 != nullid:
1264 # is one parent an ancestor of the other?
1271 # is one parent an ancestor of the other?
1265 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1272 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1266 if fparent1 in fparentancestors:
1273 if fparent1 in fparentancestors:
1267 fparent1, fparent2 = fparent2, nullid
1274 fparent1, fparent2 = fparent2, nullid
1268 elif fparent2 in fparentancestors:
1275 elif fparent2 in fparentancestors:
1269 fparent2 = nullid
1276 fparent2 = nullid
1270
1277
1271 # is the file changed?
1278 # is the file changed?
1272 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1279 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1273 changelist.append(fname)
1280 changelist.append(fname)
1274 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1281 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1275 # are just the flags changed during merge?
1282 # are just the flags changed during merge?
1276 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1283 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1277 changelist.append(fname)
1284 changelist.append(fname)
1278
1285
1279 return fparent1
1286 return fparent1
1280
1287
1281 @unfilteredmethod
1288 @unfilteredmethod
1282 def commit(self, text="", user=None, date=None, match=None, force=False,
1289 def commit(self, text="", user=None, date=None, match=None, force=False,
1283 editor=False, extra={}):
1290 editor=False, extra={}):
1284 """Add a new revision to current repository.
1291 """Add a new revision to current repository.
1285
1292
1286 Revision information is gathered from the working directory,
1293 Revision information is gathered from the working directory,
1287 match can be used to filter the committed files. If editor is
1294 match can be used to filter the committed files. If editor is
1288 supplied, it is called to get a commit message.
1295 supplied, it is called to get a commit message.
1289 """
1296 """
1290
1297
1291 def fail(f, msg):
1298 def fail(f, msg):
1292 raise util.Abort('%s: %s' % (f, msg))
1299 raise util.Abort('%s: %s' % (f, msg))
1293
1300
1294 if not match:
1301 if not match:
1295 match = matchmod.always(self.root, '')
1302 match = matchmod.always(self.root, '')
1296
1303
1297 if not force:
1304 if not force:
1298 vdirs = []
1305 vdirs = []
1299 match.explicitdir = vdirs.append
1306 match.explicitdir = vdirs.append
1300 match.bad = fail
1307 match.bad = fail
1301
1308
1302 wlock = self.wlock()
1309 wlock = self.wlock()
1303 try:
1310 try:
1304 wctx = self[None]
1311 wctx = self[None]
1305 merge = len(wctx.parents()) > 1
1312 merge = len(wctx.parents()) > 1
1306
1313
1307 if (not force and merge and match and
1314 if (not force and merge and match and
1308 (match.files() or match.anypats())):
1315 (match.files() or match.anypats())):
1309 raise util.Abort(_('cannot partially commit a merge '
1316 raise util.Abort(_('cannot partially commit a merge '
1310 '(do not specify files or patterns)'))
1317 '(do not specify files or patterns)'))
1311
1318
1312 status = self.status(match=match, clean=force)
1319 status = self.status(match=match, clean=force)
1313 if force:
1320 if force:
1314 status.modified.extend(status.clean) # mq may commit clean files
1321 status.modified.extend(status.clean) # mq may commit clean files
1315
1322
1316 # check subrepos
1323 # check subrepos
1317 subs = []
1324 subs = []
1318 commitsubs = set()
1325 commitsubs = set()
1319 newstate = wctx.substate.copy()
1326 newstate = wctx.substate.copy()
1320 # only manage subrepos and .hgsubstate if .hgsub is present
1327 # only manage subrepos and .hgsubstate if .hgsub is present
1321 if '.hgsub' in wctx:
1328 if '.hgsub' in wctx:
1322 # we'll decide whether to track this ourselves, thanks
1329 # we'll decide whether to track this ourselves, thanks
1323 for c in status.modified, status.added, status.removed:
1330 for c in status.modified, status.added, status.removed:
1324 if '.hgsubstate' in c:
1331 if '.hgsubstate' in c:
1325 c.remove('.hgsubstate')
1332 c.remove('.hgsubstate')
1326
1333
1327 # compare current state to last committed state
1334 # compare current state to last committed state
1328 # build new substate based on last committed state
1335 # build new substate based on last committed state
1329 oldstate = wctx.p1().substate
1336 oldstate = wctx.p1().substate
1330 for s in sorted(newstate.keys()):
1337 for s in sorted(newstate.keys()):
1331 if not match(s):
1338 if not match(s):
1332 # ignore working copy, use old state if present
1339 # ignore working copy, use old state if present
1333 if s in oldstate:
1340 if s in oldstate:
1334 newstate[s] = oldstate[s]
1341 newstate[s] = oldstate[s]
1335 continue
1342 continue
1336 if not force:
1343 if not force:
1337 raise util.Abort(
1344 raise util.Abort(
1338 _("commit with new subrepo %s excluded") % s)
1345 _("commit with new subrepo %s excluded") % s)
1339 if wctx.sub(s).dirty(True):
1346 if wctx.sub(s).dirty(True):
1340 if not self.ui.configbool('ui', 'commitsubrepos'):
1347 if not self.ui.configbool('ui', 'commitsubrepos'):
1341 raise util.Abort(
1348 raise util.Abort(
1342 _("uncommitted changes in subrepo %s") % s,
1349 _("uncommitted changes in subrepo %s") % s,
1343 hint=_("use --subrepos for recursive commit"))
1350 hint=_("use --subrepos for recursive commit"))
1344 subs.append(s)
1351 subs.append(s)
1345 commitsubs.add(s)
1352 commitsubs.add(s)
1346 else:
1353 else:
1347 bs = wctx.sub(s).basestate()
1354 bs = wctx.sub(s).basestate()
1348 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1355 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1349 if oldstate.get(s, (None, None, None))[1] != bs:
1356 if oldstate.get(s, (None, None, None))[1] != bs:
1350 subs.append(s)
1357 subs.append(s)
1351
1358
1352 # check for removed subrepos
1359 # check for removed subrepos
1353 for p in wctx.parents():
1360 for p in wctx.parents():
1354 r = [s for s in p.substate if s not in newstate]
1361 r = [s for s in p.substate if s not in newstate]
1355 subs += [s for s in r if match(s)]
1362 subs += [s for s in r if match(s)]
1356 if subs:
1363 if subs:
1357 if (not match('.hgsub') and
1364 if (not match('.hgsub') and
1358 '.hgsub' in (wctx.modified() + wctx.added())):
1365 '.hgsub' in (wctx.modified() + wctx.added())):
1359 raise util.Abort(
1366 raise util.Abort(
1360 _("can't commit subrepos without .hgsub"))
1367 _("can't commit subrepos without .hgsub"))
1361 status.modified.insert(0, '.hgsubstate')
1368 status.modified.insert(0, '.hgsubstate')
1362
1369
1363 elif '.hgsub' in status.removed:
1370 elif '.hgsub' in status.removed:
1364 # clean up .hgsubstate when .hgsub is removed
1371 # clean up .hgsubstate when .hgsub is removed
1365 if ('.hgsubstate' in wctx and
1372 if ('.hgsubstate' in wctx and
1366 '.hgsubstate' not in (status.modified + status.added +
1373 '.hgsubstate' not in (status.modified + status.added +
1367 status.removed)):
1374 status.removed)):
1368 status.removed.insert(0, '.hgsubstate')
1375 status.removed.insert(0, '.hgsubstate')
1369
1376
1370 # make sure all explicit patterns are matched
1377 # make sure all explicit patterns are matched
1371 if not force and match.files():
1378 if not force and match.files():
1372 matched = set(status.modified + status.added + status.removed)
1379 matched = set(status.modified + status.added + status.removed)
1373
1380
1374 for f in match.files():
1381 for f in match.files():
1375 f = self.dirstate.normalize(f)
1382 f = self.dirstate.normalize(f)
1376 if f == '.' or f in matched or f in wctx.substate:
1383 if f == '.' or f in matched or f in wctx.substate:
1377 continue
1384 continue
1378 if f in status.deleted:
1385 if f in status.deleted:
1379 fail(f, _('file not found!'))
1386 fail(f, _('file not found!'))
1380 if f in vdirs: # visited directory
1387 if f in vdirs: # visited directory
1381 d = f + '/'
1388 d = f + '/'
1382 for mf in matched:
1389 for mf in matched:
1383 if mf.startswith(d):
1390 if mf.startswith(d):
1384 break
1391 break
1385 else:
1392 else:
1386 fail(f, _("no match under directory!"))
1393 fail(f, _("no match under directory!"))
1387 elif f not in self.dirstate:
1394 elif f not in self.dirstate:
1388 fail(f, _("file not tracked!"))
1395 fail(f, _("file not tracked!"))
1389
1396
1390 cctx = context.workingcommitctx(self, status,
1397 cctx = context.workingcommitctx(self, status,
1391 text, user, date, extra)
1398 text, user, date, extra)
1392
1399
1393 if (not force and not extra.get("close") and not merge
1400 if (not force and not extra.get("close") and not merge
1394 and not cctx.files()
1401 and not cctx.files()
1395 and wctx.branch() == wctx.p1().branch()):
1402 and wctx.branch() == wctx.p1().branch()):
1396 return None
1403 return None
1397
1404
1398 if merge and cctx.deleted():
1405 if merge and cctx.deleted():
1399 raise util.Abort(_("cannot commit merge with missing files"))
1406 raise util.Abort(_("cannot commit merge with missing files"))
1400
1407
1401 ms = mergemod.mergestate(self)
1408 ms = mergemod.mergestate(self)
1402 for f in status.modified:
1409 for f in status.modified:
1403 if f in ms and ms[f] == 'u':
1410 if f in ms and ms[f] == 'u':
1404 raise util.Abort(_('unresolved merge conflicts '
1411 raise util.Abort(_('unresolved merge conflicts '
1405 '(see "hg help resolve")'))
1412 '(see "hg help resolve")'))
1406
1413
1407 if editor:
1414 if editor:
1408 cctx._text = editor(self, cctx, subs)
1415 cctx._text = editor(self, cctx, subs)
1409 edited = (text != cctx._text)
1416 edited = (text != cctx._text)
1410
1417
1411 # Save commit message in case this transaction gets rolled back
1418 # Save commit message in case this transaction gets rolled back
1412 # (e.g. by a pretxncommit hook). Leave the content alone on
1419 # (e.g. by a pretxncommit hook). Leave the content alone on
1413 # the assumption that the user will use the same editor again.
1420 # the assumption that the user will use the same editor again.
1414 msgfn = self.savecommitmessage(cctx._text)
1421 msgfn = self.savecommitmessage(cctx._text)
1415
1422
1416 # commit subs and write new state
1423 # commit subs and write new state
1417 if subs:
1424 if subs:
1418 for s in sorted(commitsubs):
1425 for s in sorted(commitsubs):
1419 sub = wctx.sub(s)
1426 sub = wctx.sub(s)
1420 self.ui.status(_('committing subrepository %s\n') %
1427 self.ui.status(_('committing subrepository %s\n') %
1421 subrepo.subrelpath(sub))
1428 subrepo.subrelpath(sub))
1422 sr = sub.commit(cctx._text, user, date)
1429 sr = sub.commit(cctx._text, user, date)
1423 newstate[s] = (newstate[s][0], sr)
1430 newstate[s] = (newstate[s][0], sr)
1424 subrepo.writestate(self, newstate)
1431 subrepo.writestate(self, newstate)
1425
1432
1426 p1, p2 = self.dirstate.parents()
1433 p1, p2 = self.dirstate.parents()
1427 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1434 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1428 try:
1435 try:
1429 self.hook("precommit", throw=True, parent1=hookp1,
1436 self.hook("precommit", throw=True, parent1=hookp1,
1430 parent2=hookp2)
1437 parent2=hookp2)
1431 ret = self.commitctx(cctx, True)
1438 ret = self.commitctx(cctx, True)
1432 except: # re-raises
1439 except: # re-raises
1433 if edited:
1440 if edited:
1434 self.ui.write(
1441 self.ui.write(
1435 _('note: commit message saved in %s\n') % msgfn)
1442 _('note: commit message saved in %s\n') % msgfn)
1436 raise
1443 raise
1437
1444
1438 # update bookmarks, dirstate and mergestate
1445 # update bookmarks, dirstate and mergestate
1439 bookmarks.update(self, [p1, p2], ret)
1446 bookmarks.update(self, [p1, p2], ret)
1440 cctx.markcommitted(ret)
1447 cctx.markcommitted(ret)
1441 ms.reset()
1448 ms.reset()
1442 finally:
1449 finally:
1443 wlock.release()
1450 wlock.release()
1444
1451
1445 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1452 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1446 # hack for command that use a temporary commit (eg: histedit)
1453 # hack for command that use a temporary commit (eg: histedit)
1447 # temporary commit got stripped before hook release
1454 # temporary commit got stripped before hook release
1448 if node in self:
1455 if node in self:
1449 self.hook("commit", node=node, parent1=parent1,
1456 self.hook("commit", node=node, parent1=parent1,
1450 parent2=parent2)
1457 parent2=parent2)
1451 self._afterlock(commithook)
1458 self._afterlock(commithook)
1452 return ret
1459 return ret
1453
1460
1454 @unfilteredmethod
1461 @unfilteredmethod
1455 def commitctx(self, ctx, error=False):
1462 def commitctx(self, ctx, error=False):
1456 """Add a new revision to current repository.
1463 """Add a new revision to current repository.
1457 Revision information is passed via the context argument.
1464 Revision information is passed via the context argument.
1458 """
1465 """
1459
1466
1460 tr = None
1467 tr = None
1461 p1, p2 = ctx.p1(), ctx.p2()
1468 p1, p2 = ctx.p1(), ctx.p2()
1462 user = ctx.user()
1469 user = ctx.user()
1463
1470
1464 lock = self.lock()
1471 lock = self.lock()
1465 try:
1472 try:
1466 tr = self.transaction("commit")
1473 tr = self.transaction("commit")
1467 trp = weakref.proxy(tr)
1474 trp = weakref.proxy(tr)
1468
1475
1469 if ctx.files():
1476 if ctx.files():
1470 m1 = p1.manifest()
1477 m1 = p1.manifest()
1471 m2 = p2.manifest()
1478 m2 = p2.manifest()
1472 m = m1.copy()
1479 m = m1.copy()
1473
1480
1474 # check in files
1481 # check in files
1475 added = []
1482 added = []
1476 changed = []
1483 changed = []
1477 removed = list(ctx.removed())
1484 removed = list(ctx.removed())
1478 linkrev = len(self)
1485 linkrev = len(self)
1479 self.ui.note(_("committing files:\n"))
1486 self.ui.note(_("committing files:\n"))
1480 for f in sorted(ctx.modified() + ctx.added()):
1487 for f in sorted(ctx.modified() + ctx.added()):
1481 self.ui.note(f + "\n")
1488 self.ui.note(f + "\n")
1482 try:
1489 try:
1483 fctx = ctx[f]
1490 fctx = ctx[f]
1484 if fctx is None:
1491 if fctx is None:
1485 removed.append(f)
1492 removed.append(f)
1486 else:
1493 else:
1487 added.append(f)
1494 added.append(f)
1488 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1495 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1489 trp, changed)
1496 trp, changed)
1490 m.setflag(f, fctx.flags())
1497 m.setflag(f, fctx.flags())
1491 except OSError, inst:
1498 except OSError, inst:
1492 self.ui.warn(_("trouble committing %s!\n") % f)
1499 self.ui.warn(_("trouble committing %s!\n") % f)
1493 raise
1500 raise
1494 except IOError, inst:
1501 except IOError, inst:
1495 errcode = getattr(inst, 'errno', errno.ENOENT)
1502 errcode = getattr(inst, 'errno', errno.ENOENT)
1496 if error or errcode and errcode != errno.ENOENT:
1503 if error or errcode and errcode != errno.ENOENT:
1497 self.ui.warn(_("trouble committing %s!\n") % f)
1504 self.ui.warn(_("trouble committing %s!\n") % f)
1498 raise
1505 raise
1499
1506
1500 # update manifest
1507 # update manifest
1501 self.ui.note(_("committing manifest\n"))
1508 self.ui.note(_("committing manifest\n"))
1502 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1509 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1503 drop = [f for f in removed if f in m]
1510 drop = [f for f in removed if f in m]
1504 for f in drop:
1511 for f in drop:
1505 del m[f]
1512 del m[f]
1506 mn = self.manifest.add(m, trp, linkrev,
1513 mn = self.manifest.add(m, trp, linkrev,
1507 p1.manifestnode(), p2.manifestnode(),
1514 p1.manifestnode(), p2.manifestnode(),
1508 added, drop)
1515 added, drop)
1509 files = changed + removed
1516 files = changed + removed
1510 else:
1517 else:
1511 mn = p1.manifestnode()
1518 mn = p1.manifestnode()
1512 files = []
1519 files = []
1513
1520
1514 # update changelog
1521 # update changelog
1515 self.ui.note(_("committing changelog\n"))
1522 self.ui.note(_("committing changelog\n"))
1516 self.changelog.delayupdate(tr)
1523 self.changelog.delayupdate(tr)
1517 n = self.changelog.add(mn, files, ctx.description(),
1524 n = self.changelog.add(mn, files, ctx.description(),
1518 trp, p1.node(), p2.node(),
1525 trp, p1.node(), p2.node(),
1519 user, ctx.date(), ctx.extra().copy())
1526 user, ctx.date(), ctx.extra().copy())
1520 p = lambda: tr.writepending() and self.root or ""
1527 p = lambda: tr.writepending() and self.root or ""
1521 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1528 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1522 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1529 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1523 parent2=xp2, pending=p)
1530 parent2=xp2, pending=p)
1524 # set the new commit is proper phase
1531 # set the new commit is proper phase
1525 targetphase = subrepo.newcommitphase(self.ui, ctx)
1532 targetphase = subrepo.newcommitphase(self.ui, ctx)
1526 if targetphase:
1533 if targetphase:
1527 # retract boundary do not alter parent changeset.
1534 # retract boundary do not alter parent changeset.
1528 # if a parent have higher the resulting phase will
1535 # if a parent have higher the resulting phase will
1529 # be compliant anyway
1536 # be compliant anyway
1530 #
1537 #
1531 # if minimal phase was 0 we don't need to retract anything
1538 # if minimal phase was 0 we don't need to retract anything
1532 phases.retractboundary(self, tr, targetphase, [n])
1539 phases.retractboundary(self, tr, targetphase, [n])
1533 tr.close()
1540 tr.close()
1534 branchmap.updatecache(self.filtered('served'))
1541 branchmap.updatecache(self.filtered('served'))
1535 return n
1542 return n
1536 finally:
1543 finally:
1537 if tr:
1544 if tr:
1538 tr.release()
1545 tr.release()
1539 lock.release()
1546 lock.release()
1540
1547
1541 @unfilteredmethod
1548 @unfilteredmethod
1542 def destroying(self):
1549 def destroying(self):
1543 '''Inform the repository that nodes are about to be destroyed.
1550 '''Inform the repository that nodes are about to be destroyed.
1544 Intended for use by strip and rollback, so there's a common
1551 Intended for use by strip and rollback, so there's a common
1545 place for anything that has to be done before destroying history.
1552 place for anything that has to be done before destroying history.
1546
1553
1547 This is mostly useful for saving state that is in memory and waiting
1554 This is mostly useful for saving state that is in memory and waiting
1548 to be flushed when the current lock is released. Because a call to
1555 to be flushed when the current lock is released. Because a call to
1549 destroyed is imminent, the repo will be invalidated causing those
1556 destroyed is imminent, the repo will be invalidated causing those
1550 changes to stay in memory (waiting for the next unlock), or vanish
1557 changes to stay in memory (waiting for the next unlock), or vanish
1551 completely.
1558 completely.
1552 '''
1559 '''
1553 # When using the same lock to commit and strip, the phasecache is left
1560 # When using the same lock to commit and strip, the phasecache is left
1554 # dirty after committing. Then when we strip, the repo is invalidated,
1561 # dirty after committing. Then when we strip, the repo is invalidated,
1555 # causing those changes to disappear.
1562 # causing those changes to disappear.
1556 if '_phasecache' in vars(self):
1563 if '_phasecache' in vars(self):
1557 self._phasecache.write()
1564 self._phasecache.write()
1558
1565
1559 @unfilteredmethod
1566 @unfilteredmethod
1560 def destroyed(self):
1567 def destroyed(self):
1561 '''Inform the repository that nodes have been destroyed.
1568 '''Inform the repository that nodes have been destroyed.
1562 Intended for use by strip and rollback, so there's a common
1569 Intended for use by strip and rollback, so there's a common
1563 place for anything that has to be done after destroying history.
1570 place for anything that has to be done after destroying history.
1564 '''
1571 '''
1565 # When one tries to:
1572 # When one tries to:
1566 # 1) destroy nodes thus calling this method (e.g. strip)
1573 # 1) destroy nodes thus calling this method (e.g. strip)
1567 # 2) use phasecache somewhere (e.g. commit)
1574 # 2) use phasecache somewhere (e.g. commit)
1568 #
1575 #
1569 # then 2) will fail because the phasecache contains nodes that were
1576 # then 2) will fail because the phasecache contains nodes that were
1570 # removed. We can either remove phasecache from the filecache,
1577 # removed. We can either remove phasecache from the filecache,
1571 # causing it to reload next time it is accessed, or simply filter
1578 # causing it to reload next time it is accessed, or simply filter
1572 # the removed nodes now and write the updated cache.
1579 # the removed nodes now and write the updated cache.
1573 self._phasecache.filterunknown(self)
1580 self._phasecache.filterunknown(self)
1574 self._phasecache.write()
1581 self._phasecache.write()
1575
1582
1576 # update the 'served' branch cache to help read only server process
1583 # update the 'served' branch cache to help read only server process
1577 # Thanks to branchcache collaboration this is done from the nearest
1584 # Thanks to branchcache collaboration this is done from the nearest
1578 # filtered subset and it is expected to be fast.
1585 # filtered subset and it is expected to be fast.
1579 branchmap.updatecache(self.filtered('served'))
1586 branchmap.updatecache(self.filtered('served'))
1580
1587
1581 # Ensure the persistent tag cache is updated. Doing it now
1588 # Ensure the persistent tag cache is updated. Doing it now
1582 # means that the tag cache only has to worry about destroyed
1589 # means that the tag cache only has to worry about destroyed
1583 # heads immediately after a strip/rollback. That in turn
1590 # heads immediately after a strip/rollback. That in turn
1584 # guarantees that "cachetip == currenttip" (comparing both rev
1591 # guarantees that "cachetip == currenttip" (comparing both rev
1585 # and node) always means no nodes have been added or destroyed.
1592 # and node) always means no nodes have been added or destroyed.
1586
1593
1587 # XXX this is suboptimal when qrefresh'ing: we strip the current
1594 # XXX this is suboptimal when qrefresh'ing: we strip the current
1588 # head, refresh the tag cache, then immediately add a new head.
1595 # head, refresh the tag cache, then immediately add a new head.
1589 # But I think doing it this way is necessary for the "instant
1596 # But I think doing it this way is necessary for the "instant
1590 # tag cache retrieval" case to work.
1597 # tag cache retrieval" case to work.
1591 self.invalidate()
1598 self.invalidate()
1592
1599
1593 def walk(self, match, node=None):
1600 def walk(self, match, node=None):
1594 '''
1601 '''
1595 walk recursively through the directory tree or a given
1602 walk recursively through the directory tree or a given
1596 changeset, finding all files matched by the match
1603 changeset, finding all files matched by the match
1597 function
1604 function
1598 '''
1605 '''
1599 return self[node].walk(match)
1606 return self[node].walk(match)
1600
1607
1601 def status(self, node1='.', node2=None, match=None,
1608 def status(self, node1='.', node2=None, match=None,
1602 ignored=False, clean=False, unknown=False,
1609 ignored=False, clean=False, unknown=False,
1603 listsubrepos=False):
1610 listsubrepos=False):
1604 '''a convenience method that calls node1.status(node2)'''
1611 '''a convenience method that calls node1.status(node2)'''
1605 return self[node1].status(node2, match, ignored, clean, unknown,
1612 return self[node1].status(node2, match, ignored, clean, unknown,
1606 listsubrepos)
1613 listsubrepos)
1607
1614
1608 def heads(self, start=None):
1615 def heads(self, start=None):
1609 heads = self.changelog.heads(start)
1616 heads = self.changelog.heads(start)
1610 # sort the output in rev descending order
1617 # sort the output in rev descending order
1611 return sorted(heads, key=self.changelog.rev, reverse=True)
1618 return sorted(heads, key=self.changelog.rev, reverse=True)
1612
1619
1613 def branchheads(self, branch=None, start=None, closed=False):
1620 def branchheads(self, branch=None, start=None, closed=False):
1614 '''return a (possibly filtered) list of heads for the given branch
1621 '''return a (possibly filtered) list of heads for the given branch
1615
1622
1616 Heads are returned in topological order, from newest to oldest.
1623 Heads are returned in topological order, from newest to oldest.
1617 If branch is None, use the dirstate branch.
1624 If branch is None, use the dirstate branch.
1618 If start is not None, return only heads reachable from start.
1625 If start is not None, return only heads reachable from start.
1619 If closed is True, return heads that are marked as closed as well.
1626 If closed is True, return heads that are marked as closed as well.
1620 '''
1627 '''
1621 if branch is None:
1628 if branch is None:
1622 branch = self[None].branch()
1629 branch = self[None].branch()
1623 branches = self.branchmap()
1630 branches = self.branchmap()
1624 if branch not in branches:
1631 if branch not in branches:
1625 return []
1632 return []
1626 # the cache returns heads ordered lowest to highest
1633 # the cache returns heads ordered lowest to highest
1627 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1634 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1628 if start is not None:
1635 if start is not None:
1629 # filter out the heads that cannot be reached from startrev
1636 # filter out the heads that cannot be reached from startrev
1630 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1637 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1631 bheads = [h for h in bheads if h in fbheads]
1638 bheads = [h for h in bheads if h in fbheads]
1632 return bheads
1639 return bheads
1633
1640
1634 def branches(self, nodes):
1641 def branches(self, nodes):
1635 if not nodes:
1642 if not nodes:
1636 nodes = [self.changelog.tip()]
1643 nodes = [self.changelog.tip()]
1637 b = []
1644 b = []
1638 for n in nodes:
1645 for n in nodes:
1639 t = n
1646 t = n
1640 while True:
1647 while True:
1641 p = self.changelog.parents(n)
1648 p = self.changelog.parents(n)
1642 if p[1] != nullid or p[0] == nullid:
1649 if p[1] != nullid or p[0] == nullid:
1643 b.append((t, n, p[0], p[1]))
1650 b.append((t, n, p[0], p[1]))
1644 break
1651 break
1645 n = p[0]
1652 n = p[0]
1646 return b
1653 return b
1647
1654
1648 def between(self, pairs):
1655 def between(self, pairs):
1649 r = []
1656 r = []
1650
1657
1651 for top, bottom in pairs:
1658 for top, bottom in pairs:
1652 n, l, i = top, [], 0
1659 n, l, i = top, [], 0
1653 f = 1
1660 f = 1
1654
1661
1655 while n != bottom and n != nullid:
1662 while n != bottom and n != nullid:
1656 p = self.changelog.parents(n)[0]
1663 p = self.changelog.parents(n)[0]
1657 if i == f:
1664 if i == f:
1658 l.append(n)
1665 l.append(n)
1659 f = f * 2
1666 f = f * 2
1660 n = p
1667 n = p
1661 i += 1
1668 i += 1
1662
1669
1663 r.append(l)
1670 r.append(l)
1664
1671
1665 return r
1672 return r
1666
1673
1667 def checkpush(self, pushop):
1674 def checkpush(self, pushop):
1668 """Extensions can override this function if additional checks have
1675 """Extensions can override this function if additional checks have
1669 to be performed before pushing, or call it if they override push
1676 to be performed before pushing, or call it if they override push
1670 command.
1677 command.
1671 """
1678 """
1672 pass
1679 pass
1673
1680
1674 @unfilteredpropertycache
1681 @unfilteredpropertycache
1675 def prepushoutgoinghooks(self):
1682 def prepushoutgoinghooks(self):
1676 """Return util.hooks consists of "(repo, remote, outgoing)"
1683 """Return util.hooks consists of "(repo, remote, outgoing)"
1677 functions, which are called before pushing changesets.
1684 functions, which are called before pushing changesets.
1678 """
1685 """
1679 return util.hooks()
1686 return util.hooks()
1680
1687
1681 def stream_in(self, remote, requirements):
1688 def stream_in(self, remote, requirements):
1682 lock = self.lock()
1689 lock = self.lock()
1683 try:
1690 try:
1684 # Save remote branchmap. We will use it later
1691 # Save remote branchmap. We will use it later
1685 # to speed up branchcache creation
1692 # to speed up branchcache creation
1686 rbranchmap = None
1693 rbranchmap = None
1687 if remote.capable("branchmap"):
1694 if remote.capable("branchmap"):
1688 rbranchmap = remote.branchmap()
1695 rbranchmap = remote.branchmap()
1689
1696
1690 fp = remote.stream_out()
1697 fp = remote.stream_out()
1691 l = fp.readline()
1698 l = fp.readline()
1692 try:
1699 try:
1693 resp = int(l)
1700 resp = int(l)
1694 except ValueError:
1701 except ValueError:
1695 raise error.ResponseError(
1702 raise error.ResponseError(
1696 _('unexpected response from remote server:'), l)
1703 _('unexpected response from remote server:'), l)
1697 if resp == 1:
1704 if resp == 1:
1698 raise util.Abort(_('operation forbidden by server'))
1705 raise util.Abort(_('operation forbidden by server'))
1699 elif resp == 2:
1706 elif resp == 2:
1700 raise util.Abort(_('locking the remote repository failed'))
1707 raise util.Abort(_('locking the remote repository failed'))
1701 elif resp != 0:
1708 elif resp != 0:
1702 raise util.Abort(_('the server sent an unknown error code'))
1709 raise util.Abort(_('the server sent an unknown error code'))
1703 self.ui.status(_('streaming all changes\n'))
1710 self.ui.status(_('streaming all changes\n'))
1704 l = fp.readline()
1711 l = fp.readline()
1705 try:
1712 try:
1706 total_files, total_bytes = map(int, l.split(' ', 1))
1713 total_files, total_bytes = map(int, l.split(' ', 1))
1707 except (ValueError, TypeError):
1714 except (ValueError, TypeError):
1708 raise error.ResponseError(
1715 raise error.ResponseError(
1709 _('unexpected response from remote server:'), l)
1716 _('unexpected response from remote server:'), l)
1710 self.ui.status(_('%d files to transfer, %s of data\n') %
1717 self.ui.status(_('%d files to transfer, %s of data\n') %
1711 (total_files, util.bytecount(total_bytes)))
1718 (total_files, util.bytecount(total_bytes)))
1712 handled_bytes = 0
1719 handled_bytes = 0
1713 self.ui.progress(_('clone'), 0, total=total_bytes)
1720 self.ui.progress(_('clone'), 0, total=total_bytes)
1714 start = time.time()
1721 start = time.time()
1715
1722
1716 tr = self.transaction(_('clone'))
1723 tr = self.transaction(_('clone'))
1717 try:
1724 try:
1718 for i in xrange(total_files):
1725 for i in xrange(total_files):
1719 # XXX doesn't support '\n' or '\r' in filenames
1726 # XXX doesn't support '\n' or '\r' in filenames
1720 l = fp.readline()
1727 l = fp.readline()
1721 try:
1728 try:
1722 name, size = l.split('\0', 1)
1729 name, size = l.split('\0', 1)
1723 size = int(size)
1730 size = int(size)
1724 except (ValueError, TypeError):
1731 except (ValueError, TypeError):
1725 raise error.ResponseError(
1732 raise error.ResponseError(
1726 _('unexpected response from remote server:'), l)
1733 _('unexpected response from remote server:'), l)
1727 if self.ui.debugflag:
1734 if self.ui.debugflag:
1728 self.ui.debug('adding %s (%s)\n' %
1735 self.ui.debug('adding %s (%s)\n' %
1729 (name, util.bytecount(size)))
1736 (name, util.bytecount(size)))
1730 # for backwards compat, name was partially encoded
1737 # for backwards compat, name was partially encoded
1731 ofp = self.svfs(store.decodedir(name), 'w')
1738 ofp = self.svfs(store.decodedir(name), 'w')
1732 for chunk in util.filechunkiter(fp, limit=size):
1739 for chunk in util.filechunkiter(fp, limit=size):
1733 handled_bytes += len(chunk)
1740 handled_bytes += len(chunk)
1734 self.ui.progress(_('clone'), handled_bytes,
1741 self.ui.progress(_('clone'), handled_bytes,
1735 total=total_bytes)
1742 total=total_bytes)
1736 ofp.write(chunk)
1743 ofp.write(chunk)
1737 ofp.close()
1744 ofp.close()
1738 tr.close()
1745 tr.close()
1739 finally:
1746 finally:
1740 tr.release()
1747 tr.release()
1741
1748
1742 # Writing straight to files circumvented the inmemory caches
1749 # Writing straight to files circumvented the inmemory caches
1743 self.invalidate()
1750 self.invalidate()
1744
1751
1745 elapsed = time.time() - start
1752 elapsed = time.time() - start
1746 if elapsed <= 0:
1753 if elapsed <= 0:
1747 elapsed = 0.001
1754 elapsed = 0.001
1748 self.ui.progress(_('clone'), None)
1755 self.ui.progress(_('clone'), None)
1749 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1756 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1750 (util.bytecount(total_bytes), elapsed,
1757 (util.bytecount(total_bytes), elapsed,
1751 util.bytecount(total_bytes / elapsed)))
1758 util.bytecount(total_bytes / elapsed)))
1752
1759
1753 # new requirements = old non-format requirements +
1760 # new requirements = old non-format requirements +
1754 # new format-related
1761 # new format-related
1755 # requirements from the streamed-in repository
1762 # requirements from the streamed-in repository
1756 requirements.update(set(self.requirements) - self.supportedformats)
1763 requirements.update(set(self.requirements) - self.supportedformats)
1757 self._applyrequirements(requirements)
1764 self._applyrequirements(requirements)
1758 self._writerequirements()
1765 self._writerequirements()
1759
1766
1760 if rbranchmap:
1767 if rbranchmap:
1761 rbheads = []
1768 rbheads = []
1762 closed = []
1769 closed = []
1763 for bheads in rbranchmap.itervalues():
1770 for bheads in rbranchmap.itervalues():
1764 rbheads.extend(bheads)
1771 rbheads.extend(bheads)
1765 for h in bheads:
1772 for h in bheads:
1766 r = self.changelog.rev(h)
1773 r = self.changelog.rev(h)
1767 b, c = self.changelog.branchinfo(r)
1774 b, c = self.changelog.branchinfo(r)
1768 if c:
1775 if c:
1769 closed.append(h)
1776 closed.append(h)
1770
1777
1771 if rbheads:
1778 if rbheads:
1772 rtiprev = max((int(self.changelog.rev(node))
1779 rtiprev = max((int(self.changelog.rev(node))
1773 for node in rbheads))
1780 for node in rbheads))
1774 cache = branchmap.branchcache(rbranchmap,
1781 cache = branchmap.branchcache(rbranchmap,
1775 self[rtiprev].node(),
1782 self[rtiprev].node(),
1776 rtiprev,
1783 rtiprev,
1777 closednodes=closed)
1784 closednodes=closed)
1778 # Try to stick it as low as possible
1785 # Try to stick it as low as possible
1779 # filter above served are unlikely to be fetch from a clone
1786 # filter above served are unlikely to be fetch from a clone
1780 for candidate in ('base', 'immutable', 'served'):
1787 for candidate in ('base', 'immutable', 'served'):
1781 rview = self.filtered(candidate)
1788 rview = self.filtered(candidate)
1782 if cache.validfor(rview):
1789 if cache.validfor(rview):
1783 self._branchcaches[candidate] = cache
1790 self._branchcaches[candidate] = cache
1784 cache.write(rview)
1791 cache.write(rview)
1785 break
1792 break
1786 self.invalidate()
1793 self.invalidate()
1787 return len(self.heads()) + 1
1794 return len(self.heads()) + 1
1788 finally:
1795 finally:
1789 lock.release()
1796 lock.release()
1790
1797
1791 def clone(self, remote, heads=[], stream=None):
1798 def clone(self, remote, heads=[], stream=None):
1792 '''clone remote repository.
1799 '''clone remote repository.
1793
1800
1794 keyword arguments:
1801 keyword arguments:
1795 heads: list of revs to clone (forces use of pull)
1802 heads: list of revs to clone (forces use of pull)
1796 stream: use streaming clone if possible'''
1803 stream: use streaming clone if possible'''
1797
1804
1798 # now, all clients that can request uncompressed clones can
1805 # now, all clients that can request uncompressed clones can
1799 # read repo formats supported by all servers that can serve
1806 # read repo formats supported by all servers that can serve
1800 # them.
1807 # them.
1801
1808
1802 # if revlog format changes, client will have to check version
1809 # if revlog format changes, client will have to check version
1803 # and format flags on "stream" capability, and use
1810 # and format flags on "stream" capability, and use
1804 # uncompressed only if compatible.
1811 # uncompressed only if compatible.
1805
1812
1806 if stream is None:
1813 if stream is None:
1807 # if the server explicitly prefers to stream (for fast LANs)
1814 # if the server explicitly prefers to stream (for fast LANs)
1808 stream = remote.capable('stream-preferred')
1815 stream = remote.capable('stream-preferred')
1809
1816
1810 if stream and not heads:
1817 if stream and not heads:
1811 # 'stream' means remote revlog format is revlogv1 only
1818 # 'stream' means remote revlog format is revlogv1 only
1812 if remote.capable('stream'):
1819 if remote.capable('stream'):
1813 self.stream_in(remote, set(('revlogv1',)))
1820 self.stream_in(remote, set(('revlogv1',)))
1814 else:
1821 else:
1815 # otherwise, 'streamreqs' contains the remote revlog format
1822 # otherwise, 'streamreqs' contains the remote revlog format
1816 streamreqs = remote.capable('streamreqs')
1823 streamreqs = remote.capable('streamreqs')
1817 if streamreqs:
1824 if streamreqs:
1818 streamreqs = set(streamreqs.split(','))
1825 streamreqs = set(streamreqs.split(','))
1819 # if we support it, stream in and adjust our requirements
1826 # if we support it, stream in and adjust our requirements
1820 if not streamreqs - self.supportedformats:
1827 if not streamreqs - self.supportedformats:
1821 self.stream_in(remote, streamreqs)
1828 self.stream_in(remote, streamreqs)
1822
1829
1823 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1830 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1824 try:
1831 try:
1825 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1832 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1826 ret = exchange.pull(self, remote, heads).cgresult
1833 ret = exchange.pull(self, remote, heads).cgresult
1827 finally:
1834 finally:
1828 self.ui.restoreconfig(quiet)
1835 self.ui.restoreconfig(quiet)
1829 return ret
1836 return ret
1830
1837
1831 def pushkey(self, namespace, key, old, new):
1838 def pushkey(self, namespace, key, old, new):
1832 try:
1839 try:
1833 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1840 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1834 old=old, new=new)
1841 old=old, new=new)
1835 except error.HookAbort, exc:
1842 except error.HookAbort, exc:
1836 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1843 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1837 if exc.hint:
1844 if exc.hint:
1838 self.ui.write_err(_("(%s)\n") % exc.hint)
1845 self.ui.write_err(_("(%s)\n") % exc.hint)
1839 return False
1846 return False
1840 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1847 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1841 ret = pushkey.push(self, namespace, key, old, new)
1848 ret = pushkey.push(self, namespace, key, old, new)
1842 def runhook():
1849 def runhook():
1843 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1850 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1844 ret=ret)
1851 ret=ret)
1845 self._afterlock(runhook)
1852 self._afterlock(runhook)
1846 return ret
1853 return ret
1847
1854
1848 def listkeys(self, namespace):
1855 def listkeys(self, namespace):
1849 self.hook('prelistkeys', throw=True, namespace=namespace)
1856 self.hook('prelistkeys', throw=True, namespace=namespace)
1850 self.ui.debug('listing keys for "%s"\n' % namespace)
1857 self.ui.debug('listing keys for "%s"\n' % namespace)
1851 values = pushkey.list(self, namespace)
1858 values = pushkey.list(self, namespace)
1852 self.hook('listkeys', namespace=namespace, values=values)
1859 self.hook('listkeys', namespace=namespace, values=values)
1853 return values
1860 return values
1854
1861
1855 def debugwireargs(self, one, two, three=None, four=None, five=None):
1862 def debugwireargs(self, one, two, three=None, four=None, five=None):
1856 '''used to test argument passing over the wire'''
1863 '''used to test argument passing over the wire'''
1857 return "%s %s %s %s %s" % (one, two, three, four, five)
1864 return "%s %s %s %s %s" % (one, two, three, four, five)
1858
1865
1859 def savecommitmessage(self, text):
1866 def savecommitmessage(self, text):
1860 fp = self.vfs('last-message.txt', 'wb')
1867 fp = self.vfs('last-message.txt', 'wb')
1861 try:
1868 try:
1862 fp.write(text)
1869 fp.write(text)
1863 finally:
1870 finally:
1864 fp.close()
1871 fp.close()
1865 return self.pathto(fp.name[len(self.root) + 1:])
1872 return self.pathto(fp.name[len(self.root) + 1:])
1866
1873
1867 # used to avoid circular references so destructors work
1874 # used to avoid circular references so destructors work
1868 def aftertrans(files):
1875 def aftertrans(files):
1869 renamefiles = [tuple(t) for t in files]
1876 renamefiles = [tuple(t) for t in files]
1870 def a():
1877 def a():
1871 for vfs, src, dest in renamefiles:
1878 for vfs, src, dest in renamefiles:
1872 try:
1879 try:
1873 vfs.rename(src, dest)
1880 vfs.rename(src, dest)
1874 except OSError: # journal file does not yet exist
1881 except OSError: # journal file does not yet exist
1875 pass
1882 pass
1876 return a
1883 return a
1877
1884
1878 def undoname(fn):
1885 def undoname(fn):
1879 base, name = os.path.split(fn)
1886 base, name = os.path.split(fn)
1880 assert name.startswith('journal')
1887 assert name.startswith('journal')
1881 return os.path.join(base, name.replace('journal', 'undo', 1))
1888 return os.path.join(base, name.replace('journal', 'undo', 1))
1882
1889
1883 def instance(ui, path, create):
1890 def instance(ui, path, create):
1884 return localrepository(ui, util.urllocalpath(path), create)
1891 return localrepository(ui, util.urllocalpath(path), create)
1885
1892
1886 def islocal(path):
1893 def islocal(path):
1887 return True
1894 return True
@@ -1,166 +1,167 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from i18n import _
10 from i18n import _
11 import changelog, byterange, url, error, namespaces
11 import changelog, byterange, url, error, namespaces
12 import localrepo, manifest, util, scmutil, store
12 import localrepo, manifest, util, scmutil, store
13 import urllib, urllib2, errno, os
13 import urllib, urllib2, errno, os
14
14
15 class httprangereader(object):
15 class httprangereader(object):
16 def __init__(self, url, opener):
16 def __init__(self, url, opener):
17 # we assume opener has HTTPRangeHandler
17 # we assume opener has HTTPRangeHandler
18 self.url = url
18 self.url = url
19 self.pos = 0
19 self.pos = 0
20 self.opener = opener
20 self.opener = opener
21 self.name = url
21 self.name = url
22 def seek(self, pos):
22 def seek(self, pos):
23 self.pos = pos
23 self.pos = pos
24 def read(self, bytes=None):
24 def read(self, bytes=None):
25 req = urllib2.Request(self.url)
25 req = urllib2.Request(self.url)
26 end = ''
26 end = ''
27 if bytes:
27 if bytes:
28 end = self.pos + bytes - 1
28 end = self.pos + bytes - 1
29 if self.pos or end:
29 if self.pos or end:
30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
31
31
32 try:
32 try:
33 f = self.opener.open(req)
33 f = self.opener.open(req)
34 data = f.read()
34 data = f.read()
35 # Python 2.6+ defines a getcode() function, and 2.4 and
35 # Python 2.6+ defines a getcode() function, and 2.4 and
36 # 2.5 appear to always have an undocumented code attribute
36 # 2.5 appear to always have an undocumented code attribute
37 # set. If we can't read either of those, fall back to 206
37 # set. If we can't read either of those, fall back to 206
38 # and hope for the best.
38 # and hope for the best.
39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
40 except urllib2.HTTPError, inst:
40 except urllib2.HTTPError, inst:
41 num = inst.code == 404 and errno.ENOENT or None
41 num = inst.code == 404 and errno.ENOENT or None
42 raise IOError(num, inst)
42 raise IOError(num, inst)
43 except urllib2.URLError, inst:
43 except urllib2.URLError, inst:
44 raise IOError(None, inst.reason[1])
44 raise IOError(None, inst.reason[1])
45
45
46 if code == 200:
46 if code == 200:
47 # HTTPRangeHandler does nothing if remote does not support
47 # HTTPRangeHandler does nothing if remote does not support
48 # Range headers and returns the full entity. Let's slice it.
48 # Range headers and returns the full entity. Let's slice it.
49 if bytes:
49 if bytes:
50 data = data[self.pos:self.pos + bytes]
50 data = data[self.pos:self.pos + bytes]
51 else:
51 else:
52 data = data[self.pos:]
52 data = data[self.pos:]
53 elif bytes:
53 elif bytes:
54 data = data[:bytes]
54 data = data[:bytes]
55 self.pos += len(data)
55 self.pos += len(data)
56 return data
56 return data
57 def readlines(self):
57 def readlines(self):
58 return self.read().splitlines(True)
58 return self.read().splitlines(True)
59 def __iter__(self):
59 def __iter__(self):
60 return iter(self.readlines())
60 return iter(self.readlines())
61 def close(self):
61 def close(self):
62 pass
62 pass
63
63
64 def build_opener(ui, authinfo):
64 def build_opener(ui, authinfo):
65 # urllib cannot handle URLs with embedded user or passwd
65 # urllib cannot handle URLs with embedded user or passwd
66 urlopener = url.opener(ui, authinfo)
66 urlopener = url.opener(ui, authinfo)
67 urlopener.add_handler(byterange.HTTPRangeHandler())
67 urlopener.add_handler(byterange.HTTPRangeHandler())
68
68
69 class statichttpvfs(scmutil.abstractvfs):
69 class statichttpvfs(scmutil.abstractvfs):
70 def __init__(self, base):
70 def __init__(self, base):
71 self.base = base
71 self.base = base
72
72
73 def __call__(self, path, mode='r', *args, **kw):
73 def __call__(self, path, mode='r', *args, **kw):
74 if mode not in ('r', 'rb'):
74 if mode not in ('r', 'rb'):
75 raise IOError('Permission denied')
75 raise IOError('Permission denied')
76 f = "/".join((self.base, urllib.quote(path)))
76 f = "/".join((self.base, urllib.quote(path)))
77 return httprangereader(f, urlopener)
77 return httprangereader(f, urlopener)
78
78
79 def join(self, path):
79 def join(self, path):
80 if path:
80 if path:
81 return os.path.join(self.base, path)
81 return os.path.join(self.base, path)
82 else:
82 else:
83 return self.base
83 return self.base
84
84
85 return statichttpvfs
85 return statichttpvfs
86
86
87 class statichttppeer(localrepo.localpeer):
87 class statichttppeer(localrepo.localpeer):
88 def local(self):
88 def local(self):
89 return None
89 return None
90 def canpush(self):
90 def canpush(self):
91 return False
91 return False
92
92
93 class statichttprepository(localrepo.localrepository):
93 class statichttprepository(localrepo.localrepository):
94 supported = localrepo.localrepository._basesupported
94 supported = localrepo.localrepository._basesupported
95
95
96 def __init__(self, ui, path):
96 def __init__(self, ui, path):
97 self._url = path
97 self._url = path
98 self.ui = ui
98 self.ui = ui
99
99
100 self.root = path
100 self.root = path
101 u = util.url(path.rstrip('/') + "/.hg")
101 u = util.url(path.rstrip('/') + "/.hg")
102 self.path, authinfo = u.authinfo()
102 self.path, authinfo = u.authinfo()
103
103
104 opener = build_opener(ui, authinfo)
104 opener = build_opener(ui, authinfo)
105 self.opener = opener(self.path)
105 self.opener = opener(self.path)
106 self.vfs = self.opener
106 self.vfs = self.opener
107 self._phasedefaults = []
107 self._phasedefaults = []
108
108
109 self.names = namespaces.namespaces()
109 self.names = namespaces.namespaces()
110
110
111 try:
111 try:
112 requirements = scmutil.readrequires(self.vfs, self.supported)
112 requirements = scmutil.readrequires(self.vfs, self.supported)
113 except IOError, inst:
113 except IOError, inst:
114 if inst.errno != errno.ENOENT:
114 if inst.errno != errno.ENOENT:
115 raise
115 raise
116 requirements = set()
116 requirements = set()
117
117
118 # check if it is a non-empty old-style repository
118 # check if it is a non-empty old-style repository
119 try:
119 try:
120 fp = self.vfs("00changelog.i")
120 fp = self.vfs("00changelog.i")
121 fp.read(1)
121 fp.read(1)
122 fp.close()
122 fp.close()
123 except IOError, inst:
123 except IOError, inst:
124 if inst.errno != errno.ENOENT:
124 if inst.errno != errno.ENOENT:
125 raise
125 raise
126 # we do not care about empty old-style repositories here
126 # we do not care about empty old-style repositories here
127 msg = _("'%s' does not appear to be an hg repository") % path
127 msg = _("'%s' does not appear to be an hg repository") % path
128 raise error.RepoError(msg)
128 raise error.RepoError(msg)
129
129
130 # setup store
130 # setup store
131 self.store = store.store(requirements, self.path, opener)
131 self.store = store.store(requirements, self.path, opener)
132 self.spath = self.store.path
132 self.spath = self.store.path
133 self.svfs = self.store.opener
133 self.svfs = self.store.opener
134 self.sopener = self.svfs
134 self.sopener = self.svfs
135 self.sjoin = self.store.join
135 self.sjoin = self.store.join
136 self._filecache = {}
136 self._filecache = {}
137 self.requirements = requirements
137 self.requirements = requirements
138
138
139 self.manifest = manifest.manifest(self.svfs)
139 self.manifest = manifest.manifest(self.svfs)
140 self.changelog = changelog.changelog(self.svfs)
140 self.changelog = changelog.changelog(self.svfs)
141 self._tags = None
141 self._tags = None
142 self.nodetagscache = None
142 self.nodetagscache = None
143 self._branchcaches = {}
143 self._branchcaches = {}
144 self._revbranchcache = None
144 self.encodepats = None
145 self.encodepats = None
145 self.decodepats = None
146 self.decodepats = None
146
147
147 def _restrictcapabilities(self, caps):
148 def _restrictcapabilities(self, caps):
148 caps = super(statichttprepository, self)._restrictcapabilities(caps)
149 caps = super(statichttprepository, self)._restrictcapabilities(caps)
149 return caps.difference(["pushkey"])
150 return caps.difference(["pushkey"])
150
151
151 def url(self):
152 def url(self):
152 return self._url
153 return self._url
153
154
154 def local(self):
155 def local(self):
155 return False
156 return False
156
157
157 def peer(self):
158 def peer(self):
158 return statichttppeer(self)
159 return statichttppeer(self)
159
160
160 def lock(self, wait=True):
161 def lock(self, wait=True):
161 raise util.Abort(_('cannot lock static-http repository'))
162 raise util.Abort(_('cannot lock static-http repository'))
162
163
163 def instance(ui, path, create):
164 def instance(ui, path, create):
164 if create:
165 if create:
165 raise util.Abort(_('cannot create new static-http repository'))
166 raise util.Abort(_('cannot create new static-http repository'))
166 return statichttprepository(ui, path[7:])
167 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now