##// END OF EJS Templates
revbranchcache: move out of branchmap onto localrepo...
Durham Goode -
r24373:59cc0924 default
parent child Browse files
Show More
@@ -1,449 +1,448 b''
1 1 # branchmap.py - logic to computes, maintain and stores branchmap for local repo
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev
9 9 import encoding
10 10 import util
11 11 import time
12 12 from array import array
13 13 from struct import calcsize, pack, unpack
14 14
15 15 def _filename(repo):
16 16 """name of a branchcache file for a given repo or repoview"""
17 17 filename = "cache/branch2"
18 18 if repo.filtername:
19 19 filename = '%s-%s' % (filename, repo.filtername)
20 20 return filename
21 21
22 22 def read(repo):
23 23 try:
24 24 f = repo.vfs(_filename(repo))
25 25 lines = f.read().split('\n')
26 26 f.close()
27 27 except (IOError, OSError):
28 28 return None
29 29
30 30 try:
31 31 cachekey = lines.pop(0).split(" ", 2)
32 32 last, lrev = cachekey[:2]
33 33 last, lrev = bin(last), int(lrev)
34 34 filteredhash = None
35 35 if len(cachekey) > 2:
36 36 filteredhash = bin(cachekey[2])
37 37 partial = branchcache(tipnode=last, tiprev=lrev,
38 38 filteredhash=filteredhash)
39 39 if not partial.validfor(repo):
40 40 # invalidate the cache
41 41 raise ValueError('tip differs')
42 42 for l in lines:
43 43 if not l:
44 44 continue
45 45 node, state, label = l.split(" ", 2)
46 46 if state not in 'oc':
47 47 raise ValueError('invalid branch state')
48 48 label = encoding.tolocal(label.strip())
49 49 if not node in repo:
50 50 raise ValueError('node %s does not exist' % node)
51 51 node = bin(node)
52 52 partial.setdefault(label, []).append(node)
53 53 if state == 'c':
54 54 partial._closednodes.add(node)
55 55 except KeyboardInterrupt:
56 56 raise
57 57 except Exception, inst:
58 58 if repo.ui.debugflag:
59 59 msg = 'invalid branchheads cache'
60 60 if repo.filtername is not None:
61 61 msg += ' (%s)' % repo.filtername
62 62 msg += ': %s\n'
63 63 repo.ui.debug(msg % inst)
64 64 partial = None
65 65 return partial
66 66
67 67 ### Nearest subset relation
68 68 # Nearest subset of filter X is a filter Y so that:
69 69 # * Y is included in X,
70 70 # * X - Y is as small as possible.
71 71 # This create and ordering used for branchmap purpose.
72 72 # the ordering may be partial
73 73 subsettable = {None: 'visible',
74 74 'visible': 'served',
75 75 'served': 'immutable',
76 76 'immutable': 'base'}
77 77
78 78 def updatecache(repo):
79 79 cl = repo.changelog
80 80 filtername = repo.filtername
81 81 partial = repo._branchcaches.get(filtername)
82 82
83 83 revs = []
84 84 if partial is None or not partial.validfor(repo):
85 85 partial = read(repo)
86 86 if partial is None:
87 87 subsetname = subsettable.get(filtername)
88 88 if subsetname is None:
89 89 partial = branchcache()
90 90 else:
91 91 subset = repo.filtered(subsetname)
92 92 partial = subset.branchmap().copy()
93 93 extrarevs = subset.changelog.filteredrevs - cl.filteredrevs
94 94 revs.extend(r for r in extrarevs if r <= partial.tiprev)
95 95 revs.extend(cl.revs(start=partial.tiprev + 1))
96 96 if revs:
97 97 partial.update(repo, revs)
98 98 partial.write(repo)
99
100 if repo._revbranchcache is not None:
101 repo._revbranchcache.write(repo)
102
99 103 assert partial.validfor(repo), filtername
100 104 repo._branchcaches[repo.filtername] = partial
101 105
102 106 class branchcache(dict):
103 107 """A dict like object that hold branches heads cache.
104 108
105 109 This cache is used to avoid costly computations to determine all the
106 110 branch heads of a repo.
107 111
108 112 The cache is serialized on disk in the following format:
109 113
110 114 <tip hex node> <tip rev number> [optional filtered repo hex hash]
111 115 <branch head hex node> <open/closed state> <branch name>
112 116 <branch head hex node> <open/closed state> <branch name>
113 117 ...
114 118
115 119 The first line is used to check if the cache is still valid. If the
116 120 branch cache is for a filtered repo view, an optional third hash is
117 121 included that hashes the hashes of all filtered revisions.
118 122
119 123 The open/closed state is represented by a single letter 'o' or 'c'.
120 124 This field can be used to avoid changelog reads when determining if a
121 125 branch head closes a branch or not.
122 126 """
123 127
124 128 def __init__(self, entries=(), tipnode=nullid, tiprev=nullrev,
125 129 filteredhash=None, closednodes=None):
126 130 super(branchcache, self).__init__(entries)
127 131 self.tipnode = tipnode
128 132 self.tiprev = tiprev
129 133 self.filteredhash = filteredhash
130 134 # closednodes is a set of nodes that close their branch. If the branch
131 135 # cache has been updated, it may contain nodes that are no longer
132 136 # heads.
133 137 if closednodes is None:
134 138 self._closednodes = set()
135 139 else:
136 140 self._closednodes = closednodes
137 self._revbranchcache = None
138 141
139 142 def _hashfiltered(self, repo):
140 143 """build hash of revision filtered in the current cache
141 144
142 145 Tracking tipnode and tiprev is not enough to ensure validity of the
143 146 cache as they do not help to distinct cache that ignored various
144 147 revision bellow tiprev.
145 148
146 149 To detect such difference, we build a cache of all ignored revisions.
147 150 """
148 151 cl = repo.changelog
149 152 if not cl.filteredrevs:
150 153 return None
151 154 key = None
152 155 revs = sorted(r for r in cl.filteredrevs if r <= self.tiprev)
153 156 if revs:
154 157 s = util.sha1()
155 158 for rev in revs:
156 159 s.update('%s;' % rev)
157 160 key = s.digest()
158 161 return key
159 162
160 163 def validfor(self, repo):
161 164 """Is the cache content valid regarding a repo
162 165
163 166 - False when cached tipnode is unknown or if we detect a strip.
164 167 - True when cache is up to date or a subset of current repo."""
165 168 try:
166 169 return ((self.tipnode == repo.changelog.node(self.tiprev))
167 170 and (self.filteredhash == self._hashfiltered(repo)))
168 171 except IndexError:
169 172 return False
170 173
171 174 def _branchtip(self, heads):
172 175 '''Return tuple with last open head in heads and false,
173 176 otherwise return last closed head and true.'''
174 177 tip = heads[-1]
175 178 closed = True
176 179 for h in reversed(heads):
177 180 if h not in self._closednodes:
178 181 tip = h
179 182 closed = False
180 183 break
181 184 return tip, closed
182 185
183 186 def branchtip(self, branch):
184 187 '''Return the tipmost open head on branch head, otherwise return the
185 188 tipmost closed head on branch.
186 189 Raise KeyError for unknown branch.'''
187 190 return self._branchtip(self[branch])[0]
188 191
189 192 def branchheads(self, branch, closed=False):
190 193 heads = self[branch]
191 194 if not closed:
192 195 heads = [h for h in heads if h not in self._closednodes]
193 196 return heads
194 197
195 198 def iterbranches(self):
196 199 for bn, heads in self.iteritems():
197 200 yield (bn, heads) + self._branchtip(heads)
198 201
199 202 def copy(self):
200 203 """return an deep copy of the branchcache object"""
201 204 return branchcache(self, self.tipnode, self.tiprev, self.filteredhash,
202 205 self._closednodes)
203 206
204 207 def write(self, repo):
205 208 try:
206 209 f = repo.vfs(_filename(repo), "w", atomictemp=True)
207 210 cachekey = [hex(self.tipnode), str(self.tiprev)]
208 211 if self.filteredhash is not None:
209 212 cachekey.append(hex(self.filteredhash))
210 213 f.write(" ".join(cachekey) + '\n')
211 214 nodecount = 0
212 215 for label, nodes in sorted(self.iteritems()):
213 216 for node in nodes:
214 217 nodecount += 1
215 218 if node in self._closednodes:
216 219 state = 'c'
217 220 else:
218 221 state = 'o'
219 222 f.write("%s %s %s\n" % (hex(node), state,
220 223 encoding.fromlocal(label)))
221 224 f.close()
222 225 repo.ui.log('branchcache',
223 226 'wrote %s branch cache with %d labels and %d nodes\n',
224 227 repo.filtername, len(self), nodecount)
225 228 except (IOError, OSError, util.Abort), inst:
226 229 repo.ui.debug("couldn't write branch cache: %s\n" % inst)
227 230 # Abort may be raise by read only opener
228 231 pass
229 if self._revbranchcache:
230 self._revbranchcache.write(repo.unfiltered())
231 self._revbranchcache = None
232 232
233 233 def update(self, repo, revgen):
234 234 """Given a branchhead cache, self, that may have extra nodes or be
235 235 missing heads, and a generator of nodes that are strictly a superset of
236 236 heads missing, this function updates self to be correct.
237 237 """
238 238 starttime = time.time()
239 239 cl = repo.changelog
240 240 # collect new branch entries
241 241 newbranches = {}
242 242 urepo = repo.unfiltered()
243 self._revbranchcache = revbranchcache(urepo)
244 getbranchinfo = self._revbranchcache.branchinfo
245 243 ucl = urepo.changelog
244 getbranchinfo = repo.revbranchcache().branchinfo
246 245 for r in revgen:
247 246 branch, closesbranch = getbranchinfo(ucl, r)
248 247 newbranches.setdefault(branch, []).append(r)
249 248 if closesbranch:
250 249 self._closednodes.add(cl.node(r))
251 250
252 251 # fetch current topological heads to speed up filtering
253 252 topoheads = set(cl.headrevs())
254 253
255 254 # if older branchheads are reachable from new ones, they aren't
256 255 # really branchheads. Note checking parents is insufficient:
257 256 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
258 257 for branch, newheadrevs in newbranches.iteritems():
259 258 bheads = self.setdefault(branch, [])
260 259 bheadset = set(cl.rev(node) for node in bheads)
261 260
262 261 # This have been tested True on all internal usage of this function.
263 262 # run it again in case of doubt
264 263 # assert not (set(bheadrevs) & set(newheadrevs))
265 264 newheadrevs.sort()
266 265 bheadset.update(newheadrevs)
267 266
268 267 # This prunes out two kinds of heads - heads that are superseded by
269 268 # a head in newheadrevs, and newheadrevs that are not heads because
270 269 # an existing head is their descendant.
271 270 uncertain = bheadset - topoheads
272 271 if uncertain:
273 272 floorrev = min(uncertain)
274 273 ancestors = set(cl.ancestors(newheadrevs, floorrev))
275 274 bheadset -= ancestors
276 275 bheadrevs = sorted(bheadset)
277 276 self[branch] = [cl.node(rev) for rev in bheadrevs]
278 277 tiprev = bheadrevs[-1]
279 278 if tiprev > self.tiprev:
280 279 self.tipnode = cl.node(tiprev)
281 280 self.tiprev = tiprev
282 281
283 282 if not self.validfor(repo):
284 283 # cache key are not valid anymore
285 284 self.tipnode = nullid
286 285 self.tiprev = nullrev
287 286 for heads in self.values():
288 287 tiprev = max(cl.rev(node) for node in heads)
289 288 if tiprev > self.tiprev:
290 289 self.tipnode = cl.node(tiprev)
291 290 self.tiprev = tiprev
292 291 self.filteredhash = self._hashfiltered(repo)
293 292
294 293 duration = time.time() - starttime
295 294 repo.ui.log('branchcache', 'updated %s branch cache in %.4f seconds\n',
296 295 repo.filtername, duration)
297 296
298 297 # Revision branch info cache
299 298
300 299 _rbcversion = '-v1'
301 300 _rbcnames = 'cache/rbc-names' + _rbcversion
302 301 _rbcrevs = 'cache/rbc-revs' + _rbcversion
303 302 # [4 byte hash prefix][4 byte branch name number with sign bit indicating open]
304 303 _rbcrecfmt = '>4sI'
305 304 _rbcrecsize = calcsize(_rbcrecfmt)
306 305 _rbcnodelen = 4
307 306 _rbcbranchidxmask = 0x7fffffff
308 307 _rbccloseflag = 0x80000000
309 308
310 309 class revbranchcache(object):
311 310 """Persistent cache, mapping from revision number to branch name and close.
312 311 This is a low level cache, independent of filtering.
313 312
314 313 Branch names are stored in rbc-names in internal encoding separated by 0.
315 314 rbc-names is append-only, and each branch name is only stored once and will
316 315 thus have a unique index.
317 316
318 317 The branch info for each revision is stored in rbc-revs as constant size
319 318 records. The whole file is read into memory, but it is only 'parsed' on
320 319 demand. The file is usually append-only but will be truncated if repo
321 320 modification is detected.
322 321 The record for each revision contains the first 4 bytes of the
323 322 corresponding node hash, and the record is only used if it still matches.
324 323 Even a completely trashed rbc-revs fill thus still give the right result
325 324 while converging towards full recovery ... assuming no incorrectly matching
326 325 node hashes.
327 326 The record also contains 4 bytes where 31 bits contains the index of the
328 327 branch and the last bit indicate that it is a branch close commit.
329 328 The usage pattern for rbc-revs is thus somewhat similar to 00changelog.i
330 329 and will grow with it but be 1/8th of its size.
331 330 """
332 331
333 332 def __init__(self, repo, readonly=True):
334 333 assert repo.filtername is None
335 334 self._names = [] # branch names in local encoding with static index
336 335 self._rbcrevs = array('c') # structs of type _rbcrecfmt
337 336 self._rbcsnameslen = 0
338 337 try:
339 338 bndata = repo.vfs.read(_rbcnames)
340 339 self._rbcsnameslen = len(bndata) # for verification before writing
341 340 self._names = [encoding.tolocal(bn) for bn in bndata.split('\0')]
342 341 except (IOError, OSError), inst:
343 342 repo.ui.debug("couldn't read revision branch cache names: %s\n" %
344 343 inst)
345 344 if readonly:
346 345 # don't try to use cache - fall back to the slow path
347 346 self.branchinfo = self._branchinfo
348 347
349 348 if self._names:
350 349 try:
351 350 data = repo.vfs.read(_rbcrevs)
352 351 self._rbcrevs.fromstring(data)
353 352 except (IOError, OSError), inst:
354 353 repo.ui.debug("couldn't read revision branch cache: %s\n" %
355 354 inst)
356 355 # remember number of good records on disk
357 356 self._rbcrevslen = min(len(self._rbcrevs) // _rbcrecsize,
358 357 len(repo.changelog))
359 358 if self._rbcrevslen == 0:
360 359 self._names = []
361 360 self._rbcnamescount = len(self._names) # number of good names on disk
362 361 self._namesreverse = dict((b, r) for r, b in enumerate(self._names))
363 362
364 363 def branchinfo(self, changelog, rev):
365 364 """Return branch name and close flag for rev, using and updating
366 365 persistent cache."""
367 366 rbcrevidx = rev * _rbcrecsize
368 367
369 368 # if requested rev is missing, add and populate all missing revs
370 369 if len(self._rbcrevs) < rbcrevidx + _rbcrecsize:
371 370 first = len(self._rbcrevs) // _rbcrecsize
372 371 self._rbcrevs.extend('\0' * (len(changelog) * _rbcrecsize -
373 372 len(self._rbcrevs)))
374 373 for r in xrange(first, len(changelog)):
375 374 self._branchinfo(changelog, r)
376 375
377 376 # fast path: extract data from cache, use it if node is matching
378 377 reponode = changelog.node(rev)[:_rbcnodelen]
379 378 cachenode, branchidx = unpack(
380 379 _rbcrecfmt, buffer(self._rbcrevs, rbcrevidx, _rbcrecsize))
381 380 close = bool(branchidx & _rbccloseflag)
382 381 if close:
383 382 branchidx &= _rbcbranchidxmask
384 383 if cachenode == reponode:
385 384 return self._names[branchidx], close
386 385 # fall back to slow path and make sure it will be written to disk
387 386 self._rbcrevslen = min(self._rbcrevslen, rev)
388 387 return self._branchinfo(changelog, rev)
389 388
390 389 def _branchinfo(self, changelog, rev):
391 390 """Retrieve branch info from changelog and update _rbcrevs"""
392 391 b, close = changelog.branchinfo(rev)
393 392 if b in self._namesreverse:
394 393 branchidx = self._namesreverse[b]
395 394 else:
396 395 branchidx = len(self._names)
397 396 self._names.append(b)
398 397 self._namesreverse[b] = branchidx
399 398 reponode = changelog.node(rev)
400 399 if close:
401 400 branchidx |= _rbccloseflag
402 401 rbcrevidx = rev * _rbcrecsize
403 402 rec = array('c')
404 403 rec.fromstring(pack(_rbcrecfmt, reponode, branchidx))
405 404 self._rbcrevs[rbcrevidx:rbcrevidx + _rbcrecsize] = rec
406 405 return b, close
407 406
408 407 def write(self, repo):
409 408 """Save branch cache if it is dirty."""
410 409 if self._rbcnamescount < len(self._names):
411 410 try:
412 411 if self._rbcnamescount != 0:
413 412 f = repo.vfs.open(_rbcnames, 'ab')
414 413 if f.tell() == self._rbcsnameslen:
415 414 f.write('\0')
416 415 else:
417 416 f.close()
418 417 repo.ui.debug("%s changed - rewriting it\n" % _rbcnames)
419 418 self._rbcnamescount = 0
420 419 self._rbcrevslen = 0
421 420 if self._rbcnamescount == 0:
422 421 f = repo.vfs.open(_rbcnames, 'wb')
423 422 f.write('\0'.join(encoding.fromlocal(b)
424 423 for b in self._names[self._rbcnamescount:]))
425 424 self._rbcsnameslen = f.tell()
426 425 f.close()
427 426 except (IOError, OSError, util.Abort), inst:
428 427 repo.ui.debug("couldn't write revision branch cache names: "
429 428 "%s\n" % inst)
430 429 return
431 430 self._rbcnamescount = len(self._names)
432 431
433 432 start = self._rbcrevslen * _rbcrecsize
434 433 if start != len(self._rbcrevs):
435 434 revs = min(len(repo.changelog), len(self._rbcrevs) // _rbcrecsize)
436 435 try:
437 436 f = repo.vfs.open(_rbcrevs, 'ab')
438 437 if f.tell() != start:
439 438 repo.ui.debug("truncating %s to %s\n" % (_rbcrevs, start))
440 439 f.seek(start)
441 440 f.truncate()
442 441 end = revs * _rbcrecsize
443 442 f.write(self._rbcrevs[start:end])
444 443 f.close()
445 444 except (IOError, OSError, util.Abort), inst:
446 445 repo.ui.debug("couldn't write revision branch cache: %s\n" %
447 446 inst)
448 447 return
449 448 self._rbcrevslen = revs
@@ -1,1887 +1,1894 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 format='HG10', **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.unbundle20(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 cg = exchange.readbundle(self.ui, cg, None)
129 129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 130 if util.safehasattr(ret, 'getchunks'):
131 131 # This is a bundle20 object, turn it into an unbundler.
132 132 # This little dance should be dropped eventually when the API
133 133 # is finally improved.
134 134 stream = util.chunkbuffer(ret.getchunks())
135 135 ret = bundle2.unbundle20(self.ui, stream)
136 136 return ret
137 137 except error.PushRaced, exc:
138 138 raise error.ResponseError(_('push failed:'), str(exc))
139 139
140 140 def lock(self):
141 141 return self._repo.lock()
142 142
143 143 def addchangegroup(self, cg, source, url):
144 144 return changegroup.addchangegroup(self._repo, cg, source, url)
145 145
146 146 def pushkey(self, namespace, key, old, new):
147 147 return self._repo.pushkey(namespace, key, old, new)
148 148
149 149 def listkeys(self, namespace):
150 150 return self._repo.listkeys(namespace)
151 151
152 152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 153 '''used to test argument passing over the wire'''
154 154 return "%s %s %s %s %s" % (one, two, three, four, five)
155 155
156 156 class locallegacypeer(localpeer):
157 157 '''peer extension which implements legacy methods too; used for tests with
158 158 restricted capabilities'''
159 159
160 160 def __init__(self, repo):
161 161 localpeer.__init__(self, repo, caps=legacycaps)
162 162
163 163 def branches(self, nodes):
164 164 return self._repo.branches(nodes)
165 165
166 166 def between(self, pairs):
167 167 return self._repo.between(pairs)
168 168
169 169 def changegroup(self, basenodes, source):
170 170 return changegroup.changegroup(self._repo, basenodes, source)
171 171
172 172 def changegroupsubset(self, bases, heads, source):
173 173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174 174
175 175 class localrepository(object):
176 176
177 177 supportedformats = set(('revlogv1', 'generaldelta'))
178 178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 179 'dotencode'))
180 180 openerreqs = set(('revlogv1', 'generaldelta'))
181 181 requirements = ['revlogv1']
182 182 filtername = None
183 183
184 184 # a list of (ui, featureset) functions.
185 185 # only functions defined in module of enabled extensions are invoked
186 186 featuresetupfuncs = set()
187 187
188 188 def _baserequirements(self, create):
189 189 return self.requirements[:]
190 190
191 191 def __init__(self, baseui, path=None, create=False):
192 192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 193 self.wopener = self.wvfs
194 194 self.root = self.wvfs.base
195 195 self.path = self.wvfs.join(".hg")
196 196 self.origroot = path
197 197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 198 self.vfs = scmutil.vfs(self.path)
199 199 self.opener = self.vfs
200 200 self.baseui = baseui
201 201 self.ui = baseui.copy()
202 202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 203 # A list of callback to shape the phase if no data were found.
204 204 # Callback are in the form: func(repo, roots) --> processed root.
205 205 # This list it to be filled by extension during repo setup
206 206 self._phasedefaults = []
207 207 try:
208 208 self.ui.readconfig(self.join("hgrc"), self.root)
209 209 extensions.loadall(self.ui)
210 210 except IOError:
211 211 pass
212 212
213 213 if self.featuresetupfuncs:
214 214 self.supported = set(self._basesupported) # use private copy
215 215 extmods = set(m.__name__ for n, m
216 216 in extensions.extensions(self.ui))
217 217 for setupfunc in self.featuresetupfuncs:
218 218 if setupfunc.__module__ in extmods:
219 219 setupfunc(self.ui, self.supported)
220 220 else:
221 221 self.supported = self._basesupported
222 222
223 223 if not self.vfs.isdir():
224 224 if create:
225 225 if not self.wvfs.exists():
226 226 self.wvfs.makedirs()
227 227 self.vfs.makedir(notindexed=True)
228 228 requirements = self._baserequirements(create)
229 229 if self.ui.configbool('format', 'usestore', True):
230 230 self.vfs.mkdir("store")
231 231 requirements.append("store")
232 232 if self.ui.configbool('format', 'usefncache', True):
233 233 requirements.append("fncache")
234 234 if self.ui.configbool('format', 'dotencode', True):
235 235 requirements.append('dotencode')
236 236 # create an invalid changelog
237 237 self.vfs.append(
238 238 "00changelog.i",
239 239 '\0\0\0\2' # represents revlogv2
240 240 ' dummy changelog to prevent using the old repo layout'
241 241 )
242 242 if self.ui.configbool('format', 'generaldelta', False):
243 243 requirements.append("generaldelta")
244 244 requirements = set(requirements)
245 245 else:
246 246 raise error.RepoError(_("repository %s not found") % path)
247 247 elif create:
248 248 raise error.RepoError(_("repository %s already exists") % path)
249 249 else:
250 250 try:
251 251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 252 except IOError, inst:
253 253 if inst.errno != errno.ENOENT:
254 254 raise
255 255 requirements = set()
256 256
257 257 self.sharedpath = self.path
258 258 try:
259 259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 260 realpath=True)
261 261 s = vfs.base
262 262 if not vfs.exists():
263 263 raise error.RepoError(
264 264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 265 self.sharedpath = s
266 266 except IOError, inst:
267 267 if inst.errno != errno.ENOENT:
268 268 raise
269 269
270 270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 271 self.spath = self.store.path
272 272 self.svfs = self.store.vfs
273 273 self.sopener = self.svfs
274 274 self.sjoin = self.store.join
275 275 self.vfs.createmode = self.store.createmode
276 276 self._applyrequirements(requirements)
277 277 if create:
278 278 self._writerequirements()
279 279
280 280
281 281 self._branchcaches = {}
282 self._revbranchcache = None
282 283 self.filterpats = {}
283 284 self._datafilters = {}
284 285 self._transref = self._lockref = self._wlockref = None
285 286
286 287 # A cache for various files under .hg/ that tracks file changes,
287 288 # (used by the filecache decorator)
288 289 #
289 290 # Maps a property name to its util.filecacheentry
290 291 self._filecache = {}
291 292
292 293 # hold sets of revision to be filtered
293 294 # should be cleared when something might have changed the filter value:
294 295 # - new changesets,
295 296 # - phase change,
296 297 # - new obsolescence marker,
297 298 # - working directory parent change,
298 299 # - bookmark changes
299 300 self.filteredrevcache = {}
300 301
301 302 # generic mapping between names and nodes
302 303 self.names = namespaces.namespaces()
303 304
304 305 def close(self):
305 306 pass
306 307
307 308 def _restrictcapabilities(self, caps):
308 309 # bundle2 is not ready for prime time, drop it unless explicitly
309 310 # required by the tests (or some brave tester)
310 311 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 312 caps = set(caps)
312 313 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 314 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 315 return caps
315 316
316 317 def _applyrequirements(self, requirements):
317 318 self.requirements = requirements
318 319 self.svfs.options = dict((r, 1) for r in requirements
319 320 if r in self.openerreqs)
320 321 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 322 if chunkcachesize is not None:
322 323 self.svfs.options['chunkcachesize'] = chunkcachesize
323 324 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 325 if maxchainlen is not None:
325 326 self.svfs.options['maxchainlen'] = maxchainlen
326 327 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
327 328 if manifestcachesize is not None:
328 329 self.svfs.options['manifestcachesize'] = manifestcachesize
329 330
330 331 def _writerequirements(self):
331 332 reqfile = self.vfs("requires", "w")
332 333 for r in sorted(self.requirements):
333 334 reqfile.write("%s\n" % r)
334 335 reqfile.close()
335 336
336 337 def _checknested(self, path):
337 338 """Determine if path is a legal nested repository."""
338 339 if not path.startswith(self.root):
339 340 return False
340 341 subpath = path[len(self.root) + 1:]
341 342 normsubpath = util.pconvert(subpath)
342 343
343 344 # XXX: Checking against the current working copy is wrong in
344 345 # the sense that it can reject things like
345 346 #
346 347 # $ hg cat -r 10 sub/x.txt
347 348 #
348 349 # if sub/ is no longer a subrepository in the working copy
349 350 # parent revision.
350 351 #
351 352 # However, it can of course also allow things that would have
352 353 # been rejected before, such as the above cat command if sub/
353 354 # is a subrepository now, but was a normal directory before.
354 355 # The old path auditor would have rejected by mistake since it
355 356 # panics when it sees sub/.hg/.
356 357 #
357 358 # All in all, checking against the working copy seems sensible
358 359 # since we want to prevent access to nested repositories on
359 360 # the filesystem *now*.
360 361 ctx = self[None]
361 362 parts = util.splitpath(subpath)
362 363 while parts:
363 364 prefix = '/'.join(parts)
364 365 if prefix in ctx.substate:
365 366 if prefix == normsubpath:
366 367 return True
367 368 else:
368 369 sub = ctx.sub(prefix)
369 370 return sub.checknested(subpath[len(prefix) + 1:])
370 371 else:
371 372 parts.pop()
372 373 return False
373 374
374 375 def peer(self):
375 376 return localpeer(self) # not cached to avoid reference cycle
376 377
377 378 def unfiltered(self):
378 379 """Return unfiltered version of the repository
379 380
380 381 Intended to be overwritten by filtered repo."""
381 382 return self
382 383
383 384 def filtered(self, name):
384 385 """Return a filtered version of a repository"""
385 386 # build a new class with the mixin and the current class
386 387 # (possibly subclass of the repo)
387 388 class proxycls(repoview.repoview, self.unfiltered().__class__):
388 389 pass
389 390 return proxycls(self, name)
390 391
391 392 @repofilecache('bookmarks')
392 393 def _bookmarks(self):
393 394 return bookmarks.bmstore(self)
394 395
395 396 @repofilecache('bookmarks.current')
396 397 def _bookmarkcurrent(self):
397 398 return bookmarks.readcurrent(self)
398 399
399 400 def bookmarkheads(self, bookmark):
400 401 name = bookmark.split('@', 1)[0]
401 402 heads = []
402 403 for mark, n in self._bookmarks.iteritems():
403 404 if mark.split('@', 1)[0] == name:
404 405 heads.append(n)
405 406 return heads
406 407
407 408 @storecache('phaseroots')
408 409 def _phasecache(self):
409 410 return phases.phasecache(self, self._phasedefaults)
410 411
411 412 @storecache('obsstore')
412 413 def obsstore(self):
413 414 # read default format for new obsstore.
414 415 defaultformat = self.ui.configint('format', 'obsstore-version', None)
415 416 # rely on obsstore class default when possible.
416 417 kwargs = {}
417 418 if defaultformat is not None:
418 419 kwargs['defaultformat'] = defaultformat
419 420 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
420 421 store = obsolete.obsstore(self.svfs, readonly=readonly,
421 422 **kwargs)
422 423 if store and readonly:
423 424 # message is rare enough to not be translated
424 425 msg = 'obsolete feature not enabled but %i markers found!\n'
425 426 self.ui.warn(msg % len(list(store)))
426 427 return store
427 428
428 429 @storecache('00changelog.i')
429 430 def changelog(self):
430 431 c = changelog.changelog(self.svfs)
431 432 if 'HG_PENDING' in os.environ:
432 433 p = os.environ['HG_PENDING']
433 434 if p.startswith(self.root):
434 435 c.readpending('00changelog.i.a')
435 436 return c
436 437
437 438 @storecache('00manifest.i')
438 439 def manifest(self):
439 440 return manifest.manifest(self.svfs)
440 441
441 442 @repofilecache('dirstate')
442 443 def dirstate(self):
443 444 warned = [0]
444 445 def validate(node):
445 446 try:
446 447 self.changelog.rev(node)
447 448 return node
448 449 except error.LookupError:
449 450 if not warned[0]:
450 451 warned[0] = True
451 452 self.ui.warn(_("warning: ignoring unknown"
452 453 " working parent %s!\n") % short(node))
453 454 return nullid
454 455
455 456 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
456 457
457 458 def __getitem__(self, changeid):
458 459 if changeid is None:
459 460 return context.workingctx(self)
460 461 if isinstance(changeid, slice):
461 462 return [context.changectx(self, i)
462 463 for i in xrange(*changeid.indices(len(self)))
463 464 if i not in self.changelog.filteredrevs]
464 465 return context.changectx(self, changeid)
465 466
466 467 def __contains__(self, changeid):
467 468 try:
468 469 self[changeid]
469 470 return True
470 471 except error.RepoLookupError:
471 472 return False
472 473
473 474 def __nonzero__(self):
474 475 return True
475 476
476 477 def __len__(self):
477 478 return len(self.changelog)
478 479
479 480 def __iter__(self):
480 481 return iter(self.changelog)
481 482
482 483 def revs(self, expr, *args):
483 484 '''Return a list of revisions matching the given revset'''
484 485 expr = revset.formatspec(expr, *args)
485 486 m = revset.match(None, expr)
486 487 return m(self)
487 488
488 489 def set(self, expr, *args):
489 490 '''
490 491 Yield a context for each matching revision, after doing arg
491 492 replacement via revset.formatspec
492 493 '''
493 494 for r in self.revs(expr, *args):
494 495 yield self[r]
495 496
496 497 def url(self):
497 498 return 'file:' + self.root
498 499
499 500 def hook(self, name, throw=False, **args):
500 501 """Call a hook, passing this repo instance.
501 502
502 503 This a convenience method to aid invoking hooks. Extensions likely
503 504 won't call this unless they have registered a custom hook or are
504 505 replacing code that is expected to call a hook.
505 506 """
506 507 return hook.hook(self.ui, self, name, throw, **args)
507 508
508 509 @unfilteredmethod
509 510 def _tag(self, names, node, message, local, user, date, extra={},
510 511 editor=False):
511 512 if isinstance(names, str):
512 513 names = (names,)
513 514
514 515 branches = self.branchmap()
515 516 for name in names:
516 517 self.hook('pretag', throw=True, node=hex(node), tag=name,
517 518 local=local)
518 519 if name in branches:
519 520 self.ui.warn(_("warning: tag %s conflicts with existing"
520 521 " branch name\n") % name)
521 522
522 523 def writetags(fp, names, munge, prevtags):
523 524 fp.seek(0, 2)
524 525 if prevtags and prevtags[-1] != '\n':
525 526 fp.write('\n')
526 527 for name in names:
527 528 if munge:
528 529 m = munge(name)
529 530 else:
530 531 m = name
531 532
532 533 if (self._tagscache.tagtypes and
533 534 name in self._tagscache.tagtypes):
534 535 old = self.tags().get(name, nullid)
535 536 fp.write('%s %s\n' % (hex(old), m))
536 537 fp.write('%s %s\n' % (hex(node), m))
537 538 fp.close()
538 539
539 540 prevtags = ''
540 541 if local:
541 542 try:
542 543 fp = self.vfs('localtags', 'r+')
543 544 except IOError:
544 545 fp = self.vfs('localtags', 'a')
545 546 else:
546 547 prevtags = fp.read()
547 548
548 549 # local tags are stored in the current charset
549 550 writetags(fp, names, None, prevtags)
550 551 for name in names:
551 552 self.hook('tag', node=hex(node), tag=name, local=local)
552 553 return
553 554
554 555 try:
555 556 fp = self.wfile('.hgtags', 'rb+')
556 557 except IOError, e:
557 558 if e.errno != errno.ENOENT:
558 559 raise
559 560 fp = self.wfile('.hgtags', 'ab')
560 561 else:
561 562 prevtags = fp.read()
562 563
563 564 # committed tags are stored in UTF-8
564 565 writetags(fp, names, encoding.fromlocal, prevtags)
565 566
566 567 fp.close()
567 568
568 569 self.invalidatecaches()
569 570
570 571 if '.hgtags' not in self.dirstate:
571 572 self[None].add(['.hgtags'])
572 573
573 574 m = matchmod.exact(self.root, '', ['.hgtags'])
574 575 tagnode = self.commit(message, user, date, extra=extra, match=m,
575 576 editor=editor)
576 577
577 578 for name in names:
578 579 self.hook('tag', node=hex(node), tag=name, local=local)
579 580
580 581 return tagnode
581 582
582 583 def tag(self, names, node, message, local, user, date, editor=False):
583 584 '''tag a revision with one or more symbolic names.
584 585
585 586 names is a list of strings or, when adding a single tag, names may be a
586 587 string.
587 588
588 589 if local is True, the tags are stored in a per-repository file.
589 590 otherwise, they are stored in the .hgtags file, and a new
590 591 changeset is committed with the change.
591 592
592 593 keyword arguments:
593 594
594 595 local: whether to store tags in non-version-controlled file
595 596 (default False)
596 597
597 598 message: commit message to use if committing
598 599
599 600 user: name of user to use if committing
600 601
601 602 date: date tuple to use if committing'''
602 603
603 604 if not local:
604 605 m = matchmod.exact(self.root, '', ['.hgtags'])
605 606 if util.any(self.status(match=m, unknown=True, ignored=True)):
606 607 raise util.Abort(_('working copy of .hgtags is changed'),
607 608 hint=_('please commit .hgtags manually'))
608 609
609 610 self.tags() # instantiate the cache
610 611 self._tag(names, node, message, local, user, date, editor=editor)
611 612
612 613 @filteredpropertycache
613 614 def _tagscache(self):
614 615 '''Returns a tagscache object that contains various tags related
615 616 caches.'''
616 617
617 618 # This simplifies its cache management by having one decorated
618 619 # function (this one) and the rest simply fetch things from it.
619 620 class tagscache(object):
620 621 def __init__(self):
621 622 # These two define the set of tags for this repository. tags
622 623 # maps tag name to node; tagtypes maps tag name to 'global' or
623 624 # 'local'. (Global tags are defined by .hgtags across all
624 625 # heads, and local tags are defined in .hg/localtags.)
625 626 # They constitute the in-memory cache of tags.
626 627 self.tags = self.tagtypes = None
627 628
628 629 self.nodetagscache = self.tagslist = None
629 630
630 631 cache = tagscache()
631 632 cache.tags, cache.tagtypes = self._findtags()
632 633
633 634 return cache
634 635
635 636 def tags(self):
636 637 '''return a mapping of tag to node'''
637 638 t = {}
638 639 if self.changelog.filteredrevs:
639 640 tags, tt = self._findtags()
640 641 else:
641 642 tags = self._tagscache.tags
642 643 for k, v in tags.iteritems():
643 644 try:
644 645 # ignore tags to unknown nodes
645 646 self.changelog.rev(v)
646 647 t[k] = v
647 648 except (error.LookupError, ValueError):
648 649 pass
649 650 return t
650 651
651 652 def _findtags(self):
652 653 '''Do the hard work of finding tags. Return a pair of dicts
653 654 (tags, tagtypes) where tags maps tag name to node, and tagtypes
654 655 maps tag name to a string like \'global\' or \'local\'.
655 656 Subclasses or extensions are free to add their own tags, but
656 657 should be aware that the returned dicts will be retained for the
657 658 duration of the localrepo object.'''
658 659
659 660 # XXX what tagtype should subclasses/extensions use? Currently
660 661 # mq and bookmarks add tags, but do not set the tagtype at all.
661 662 # Should each extension invent its own tag type? Should there
662 663 # be one tagtype for all such "virtual" tags? Or is the status
663 664 # quo fine?
664 665
665 666 alltags = {} # map tag name to (node, hist)
666 667 tagtypes = {}
667 668
668 669 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
669 670 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
670 671
671 672 # Build the return dicts. Have to re-encode tag names because
672 673 # the tags module always uses UTF-8 (in order not to lose info
673 674 # writing to the cache), but the rest of Mercurial wants them in
674 675 # local encoding.
675 676 tags = {}
676 677 for (name, (node, hist)) in alltags.iteritems():
677 678 if node != nullid:
678 679 tags[encoding.tolocal(name)] = node
679 680 tags['tip'] = self.changelog.tip()
680 681 tagtypes = dict([(encoding.tolocal(name), value)
681 682 for (name, value) in tagtypes.iteritems()])
682 683 return (tags, tagtypes)
683 684
684 685 def tagtype(self, tagname):
685 686 '''
686 687 return the type of the given tag. result can be:
687 688
688 689 'local' : a local tag
689 690 'global' : a global tag
690 691 None : tag does not exist
691 692 '''
692 693
693 694 return self._tagscache.tagtypes.get(tagname)
694 695
695 696 def tagslist(self):
696 697 '''return a list of tags ordered by revision'''
697 698 if not self._tagscache.tagslist:
698 699 l = []
699 700 for t, n in self.tags().iteritems():
700 701 l.append((self.changelog.rev(n), t, n))
701 702 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
702 703
703 704 return self._tagscache.tagslist
704 705
705 706 def nodetags(self, node):
706 707 '''return the tags associated with a node'''
707 708 if not self._tagscache.nodetagscache:
708 709 nodetagscache = {}
709 710 for t, n in self._tagscache.tags.iteritems():
710 711 nodetagscache.setdefault(n, []).append(t)
711 712 for tags in nodetagscache.itervalues():
712 713 tags.sort()
713 714 self._tagscache.nodetagscache = nodetagscache
714 715 return self._tagscache.nodetagscache.get(node, [])
715 716
716 717 def nodebookmarks(self, node):
717 718 marks = []
718 719 for bookmark, n in self._bookmarks.iteritems():
719 720 if n == node:
720 721 marks.append(bookmark)
721 722 return sorted(marks)
722 723
723 724 def branchmap(self):
724 725 '''returns a dictionary {branch: [branchheads]} with branchheads
725 726 ordered by increasing revision number'''
726 727 branchmap.updatecache(self)
727 728 return self._branchcaches[self.filtername]
728 729
730 @unfilteredmethod
731 def revbranchcache(self):
732 if not self._revbranchcache:
733 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
734 return self._revbranchcache
735
729 736 def branchtip(self, branch, ignoremissing=False):
730 737 '''return the tip node for a given branch
731 738
732 739 If ignoremissing is True, then this method will not raise an error.
733 740 This is helpful for callers that only expect None for a missing branch
734 741 (e.g. namespace).
735 742
736 743 '''
737 744 try:
738 745 return self.branchmap().branchtip(branch)
739 746 except KeyError:
740 747 if not ignoremissing:
741 748 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
742 749 else:
743 750 pass
744 751
745 752 def lookup(self, key):
746 753 return self[key].node()
747 754
748 755 def lookupbranch(self, key, remote=None):
749 756 repo = remote or self
750 757 if key in repo.branchmap():
751 758 return key
752 759
753 760 repo = (remote and remote.local()) and remote or self
754 761 return repo[key].branch()
755 762
756 763 def known(self, nodes):
757 764 nm = self.changelog.nodemap
758 765 pc = self._phasecache
759 766 result = []
760 767 for n in nodes:
761 768 r = nm.get(n)
762 769 resp = not (r is None or pc.phase(self, r) >= phases.secret)
763 770 result.append(resp)
764 771 return result
765 772
766 773 def local(self):
767 774 return self
768 775
769 776 def cancopy(self):
770 777 # so statichttprepo's override of local() works
771 778 if not self.local():
772 779 return False
773 780 if not self.ui.configbool('phases', 'publish', True):
774 781 return True
775 782 # if publishing we can't copy if there is filtered content
776 783 return not self.filtered('visible').changelog.filteredrevs
777 784
778 785 def shared(self):
779 786 '''the type of shared repository (None if not shared)'''
780 787 if self.sharedpath != self.path:
781 788 return 'store'
782 789 return None
783 790
784 791 def join(self, f, *insidef):
785 792 return self.vfs.join(os.path.join(f, *insidef))
786 793
787 794 def wjoin(self, f, *insidef):
788 795 return self.vfs.reljoin(self.root, f, *insidef)
789 796
790 797 def file(self, f):
791 798 if f[0] == '/':
792 799 f = f[1:]
793 800 return filelog.filelog(self.svfs, f)
794 801
795 802 def changectx(self, changeid):
796 803 return self[changeid]
797 804
798 805 def parents(self, changeid=None):
799 806 '''get list of changectxs for parents of changeid'''
800 807 return self[changeid].parents()
801 808
802 809 def setparents(self, p1, p2=nullid):
803 810 self.dirstate.beginparentchange()
804 811 copies = self.dirstate.setparents(p1, p2)
805 812 pctx = self[p1]
806 813 if copies:
807 814 # Adjust copy records, the dirstate cannot do it, it
808 815 # requires access to parents manifests. Preserve them
809 816 # only for entries added to first parent.
810 817 for f in copies:
811 818 if f not in pctx and copies[f] in pctx:
812 819 self.dirstate.copy(copies[f], f)
813 820 if p2 == nullid:
814 821 for f, s in sorted(self.dirstate.copies().items()):
815 822 if f not in pctx and s not in pctx:
816 823 self.dirstate.copy(None, f)
817 824 self.dirstate.endparentchange()
818 825
819 826 def filectx(self, path, changeid=None, fileid=None):
820 827 """changeid can be a changeset revision, node, or tag.
821 828 fileid can be a file revision or node."""
822 829 return context.filectx(self, path, changeid, fileid)
823 830
824 831 def getcwd(self):
825 832 return self.dirstate.getcwd()
826 833
827 834 def pathto(self, f, cwd=None):
828 835 return self.dirstate.pathto(f, cwd)
829 836
830 837 def wfile(self, f, mode='r'):
831 838 return self.wvfs(f, mode)
832 839
833 840 def _link(self, f):
834 841 return self.wvfs.islink(f)
835 842
836 843 def _loadfilter(self, filter):
837 844 if filter not in self.filterpats:
838 845 l = []
839 846 for pat, cmd in self.ui.configitems(filter):
840 847 if cmd == '!':
841 848 continue
842 849 mf = matchmod.match(self.root, '', [pat])
843 850 fn = None
844 851 params = cmd
845 852 for name, filterfn in self._datafilters.iteritems():
846 853 if cmd.startswith(name):
847 854 fn = filterfn
848 855 params = cmd[len(name):].lstrip()
849 856 break
850 857 if not fn:
851 858 fn = lambda s, c, **kwargs: util.filter(s, c)
852 859 # Wrap old filters not supporting keyword arguments
853 860 if not inspect.getargspec(fn)[2]:
854 861 oldfn = fn
855 862 fn = lambda s, c, **kwargs: oldfn(s, c)
856 863 l.append((mf, fn, params))
857 864 self.filterpats[filter] = l
858 865 return self.filterpats[filter]
859 866
860 867 def _filter(self, filterpats, filename, data):
861 868 for mf, fn, cmd in filterpats:
862 869 if mf(filename):
863 870 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
864 871 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
865 872 break
866 873
867 874 return data
868 875
869 876 @unfilteredpropertycache
870 877 def _encodefilterpats(self):
871 878 return self._loadfilter('encode')
872 879
873 880 @unfilteredpropertycache
874 881 def _decodefilterpats(self):
875 882 return self._loadfilter('decode')
876 883
877 884 def adddatafilter(self, name, filter):
878 885 self._datafilters[name] = filter
879 886
880 887 def wread(self, filename):
881 888 if self._link(filename):
882 889 data = self.wvfs.readlink(filename)
883 890 else:
884 891 data = self.wvfs.read(filename)
885 892 return self._filter(self._encodefilterpats, filename, data)
886 893
887 894 def wwrite(self, filename, data, flags):
888 895 data = self._filter(self._decodefilterpats, filename, data)
889 896 if 'l' in flags:
890 897 self.wvfs.symlink(data, filename)
891 898 else:
892 899 self.wvfs.write(filename, data)
893 900 if 'x' in flags:
894 901 self.wvfs.setflags(filename, False, True)
895 902
896 903 def wwritedata(self, filename, data):
897 904 return self._filter(self._decodefilterpats, filename, data)
898 905
899 906 def currenttransaction(self):
900 907 """return the current transaction or None if non exists"""
901 908 if self._transref:
902 909 tr = self._transref()
903 910 else:
904 911 tr = None
905 912
906 913 if tr and tr.running():
907 914 return tr
908 915 return None
909 916
910 917 def transaction(self, desc, report=None):
911 918 tr = self.currenttransaction()
912 919 if tr is not None:
913 920 return tr.nest()
914 921
915 922 # abort here if the journal already exists
916 923 if self.svfs.exists("journal"):
917 924 raise error.RepoError(
918 925 _("abandoned transaction found"),
919 926 hint=_("run 'hg recover' to clean up transaction"))
920 927
921 928 self.hook('pretxnopen', throw=True, txnname=desc)
922 929
923 930 self._writejournal(desc)
924 931 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
925 932 if report:
926 933 rp = report
927 934 else:
928 935 rp = self.ui.warn
929 936 vfsmap = {'plain': self.vfs} # root of .hg/
930 937 # we must avoid cyclic reference between repo and transaction.
931 938 reporef = weakref.ref(self)
932 939 def validate(tr):
933 940 """will run pre-closing hooks"""
934 941 pending = lambda: tr.writepending() and self.root or ""
935 942 reporef().hook('pretxnclose', throw=True, pending=pending,
936 943 xnname=desc)
937 944
938 945 tr = transaction.transaction(rp, self.sopener, vfsmap,
939 946 "journal",
940 947 "undo",
941 948 aftertrans(renames),
942 949 self.store.createmode,
943 950 validator=validate)
944 951 # note: writing the fncache only during finalize mean that the file is
945 952 # outdated when running hooks. As fncache is used for streaming clone,
946 953 # this is not expected to break anything that happen during the hooks.
947 954 tr.addfinalize('flush-fncache', self.store.write)
948 955 def txnclosehook(tr2):
949 956 """To be run if transaction is successful, will schedule a hook run
950 957 """
951 958 def hook():
952 959 reporef().hook('txnclose', throw=False, txnname=desc,
953 960 **tr2.hookargs)
954 961 reporef()._afterlock(hook)
955 962 tr.addfinalize('txnclose-hook', txnclosehook)
956 963 self._transref = weakref.ref(tr)
957 964 return tr
958 965
959 966 def _journalfiles(self):
960 967 return ((self.svfs, 'journal'),
961 968 (self.vfs, 'journal.dirstate'),
962 969 (self.vfs, 'journal.branch'),
963 970 (self.vfs, 'journal.desc'),
964 971 (self.vfs, 'journal.bookmarks'),
965 972 (self.svfs, 'journal.phaseroots'))
966 973
967 974 def undofiles(self):
968 975 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
969 976
970 977 def _writejournal(self, desc):
971 978 self.vfs.write("journal.dirstate",
972 979 self.vfs.tryread("dirstate"))
973 980 self.vfs.write("journal.branch",
974 981 encoding.fromlocal(self.dirstate.branch()))
975 982 self.vfs.write("journal.desc",
976 983 "%d\n%s\n" % (len(self), desc))
977 984 self.vfs.write("journal.bookmarks",
978 985 self.vfs.tryread("bookmarks"))
979 986 self.svfs.write("journal.phaseroots",
980 987 self.svfs.tryread("phaseroots"))
981 988
982 989 def recover(self):
983 990 lock = self.lock()
984 991 try:
985 992 if self.svfs.exists("journal"):
986 993 self.ui.status(_("rolling back interrupted transaction\n"))
987 994 vfsmap = {'': self.svfs,
988 995 'plain': self.vfs,}
989 996 transaction.rollback(self.svfs, vfsmap, "journal",
990 997 self.ui.warn)
991 998 self.invalidate()
992 999 return True
993 1000 else:
994 1001 self.ui.warn(_("no interrupted transaction available\n"))
995 1002 return False
996 1003 finally:
997 1004 lock.release()
998 1005
999 1006 def rollback(self, dryrun=False, force=False):
1000 1007 wlock = lock = None
1001 1008 try:
1002 1009 wlock = self.wlock()
1003 1010 lock = self.lock()
1004 1011 if self.svfs.exists("undo"):
1005 1012 return self._rollback(dryrun, force)
1006 1013 else:
1007 1014 self.ui.warn(_("no rollback information available\n"))
1008 1015 return 1
1009 1016 finally:
1010 1017 release(lock, wlock)
1011 1018
1012 1019 @unfilteredmethod # Until we get smarter cache management
1013 1020 def _rollback(self, dryrun, force):
1014 1021 ui = self.ui
1015 1022 try:
1016 1023 args = self.vfs.read('undo.desc').splitlines()
1017 1024 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1018 1025 if len(args) >= 3:
1019 1026 detail = args[2]
1020 1027 oldtip = oldlen - 1
1021 1028
1022 1029 if detail and ui.verbose:
1023 1030 msg = (_('repository tip rolled back to revision %s'
1024 1031 ' (undo %s: %s)\n')
1025 1032 % (oldtip, desc, detail))
1026 1033 else:
1027 1034 msg = (_('repository tip rolled back to revision %s'
1028 1035 ' (undo %s)\n')
1029 1036 % (oldtip, desc))
1030 1037 except IOError:
1031 1038 msg = _('rolling back unknown transaction\n')
1032 1039 desc = None
1033 1040
1034 1041 if not force and self['.'] != self['tip'] and desc == 'commit':
1035 1042 raise util.Abort(
1036 1043 _('rollback of last commit while not checked out '
1037 1044 'may lose data'), hint=_('use -f to force'))
1038 1045
1039 1046 ui.status(msg)
1040 1047 if dryrun:
1041 1048 return 0
1042 1049
1043 1050 parents = self.dirstate.parents()
1044 1051 self.destroying()
1045 1052 vfsmap = {'plain': self.vfs, '': self.svfs}
1046 1053 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1047 1054 if self.vfs.exists('undo.bookmarks'):
1048 1055 self.vfs.rename('undo.bookmarks', 'bookmarks')
1049 1056 if self.svfs.exists('undo.phaseroots'):
1050 1057 self.svfs.rename('undo.phaseroots', 'phaseroots')
1051 1058 self.invalidate()
1052 1059
1053 1060 parentgone = (parents[0] not in self.changelog.nodemap or
1054 1061 parents[1] not in self.changelog.nodemap)
1055 1062 if parentgone:
1056 1063 self.vfs.rename('undo.dirstate', 'dirstate')
1057 1064 try:
1058 1065 branch = self.vfs.read('undo.branch')
1059 1066 self.dirstate.setbranch(encoding.tolocal(branch))
1060 1067 except IOError:
1061 1068 ui.warn(_('named branch could not be reset: '
1062 1069 'current branch is still \'%s\'\n')
1063 1070 % self.dirstate.branch())
1064 1071
1065 1072 self.dirstate.invalidate()
1066 1073 parents = tuple([p.rev() for p in self.parents()])
1067 1074 if len(parents) > 1:
1068 1075 ui.status(_('working directory now based on '
1069 1076 'revisions %d and %d\n') % parents)
1070 1077 else:
1071 1078 ui.status(_('working directory now based on '
1072 1079 'revision %d\n') % parents)
1073 1080 # TODO: if we know which new heads may result from this rollback, pass
1074 1081 # them to destroy(), which will prevent the branchhead cache from being
1075 1082 # invalidated.
1076 1083 self.destroyed()
1077 1084 return 0
1078 1085
1079 1086 def invalidatecaches(self):
1080 1087
1081 1088 if '_tagscache' in vars(self):
1082 1089 # can't use delattr on proxy
1083 1090 del self.__dict__['_tagscache']
1084 1091
1085 1092 self.unfiltered()._branchcaches.clear()
1086 1093 self.invalidatevolatilesets()
1087 1094
1088 1095 def invalidatevolatilesets(self):
1089 1096 self.filteredrevcache.clear()
1090 1097 obsolete.clearobscaches(self)
1091 1098
1092 1099 def invalidatedirstate(self):
1093 1100 '''Invalidates the dirstate, causing the next call to dirstate
1094 1101 to check if it was modified since the last time it was read,
1095 1102 rereading it if it has.
1096 1103
1097 1104 This is different to dirstate.invalidate() that it doesn't always
1098 1105 rereads the dirstate. Use dirstate.invalidate() if you want to
1099 1106 explicitly read the dirstate again (i.e. restoring it to a previous
1100 1107 known good state).'''
1101 1108 if hasunfilteredcache(self, 'dirstate'):
1102 1109 for k in self.dirstate._filecache:
1103 1110 try:
1104 1111 delattr(self.dirstate, k)
1105 1112 except AttributeError:
1106 1113 pass
1107 1114 delattr(self.unfiltered(), 'dirstate')
1108 1115
1109 1116 def invalidate(self):
1110 1117 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1111 1118 for k in self._filecache:
1112 1119 # dirstate is invalidated separately in invalidatedirstate()
1113 1120 if k == 'dirstate':
1114 1121 continue
1115 1122
1116 1123 try:
1117 1124 delattr(unfiltered, k)
1118 1125 except AttributeError:
1119 1126 pass
1120 1127 self.invalidatecaches()
1121 1128 self.store.invalidatecaches()
1122 1129
1123 1130 def invalidateall(self):
1124 1131 '''Fully invalidates both store and non-store parts, causing the
1125 1132 subsequent operation to reread any outside changes.'''
1126 1133 # extension should hook this to invalidate its caches
1127 1134 self.invalidate()
1128 1135 self.invalidatedirstate()
1129 1136
1130 1137 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1131 1138 try:
1132 1139 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1133 1140 except error.LockHeld, inst:
1134 1141 if not wait:
1135 1142 raise
1136 1143 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1137 1144 (desc, inst.locker))
1138 1145 # default to 600 seconds timeout
1139 1146 l = lockmod.lock(vfs, lockname,
1140 1147 int(self.ui.config("ui", "timeout", "600")),
1141 1148 releasefn, desc=desc)
1142 1149 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1143 1150 if acquirefn:
1144 1151 acquirefn()
1145 1152 return l
1146 1153
1147 1154 def _afterlock(self, callback):
1148 1155 """add a callback to the current repository lock.
1149 1156
1150 1157 The callback will be executed on lock release."""
1151 1158 l = self._lockref and self._lockref()
1152 1159 if l:
1153 1160 l.postrelease.append(callback)
1154 1161 else:
1155 1162 callback()
1156 1163
1157 1164 def lock(self, wait=True):
1158 1165 '''Lock the repository store (.hg/store) and return a weak reference
1159 1166 to the lock. Use this before modifying the store (e.g. committing or
1160 1167 stripping). If you are opening a transaction, get a lock as well.)'''
1161 1168 l = self._lockref and self._lockref()
1162 1169 if l is not None and l.held:
1163 1170 l.lock()
1164 1171 return l
1165 1172
1166 1173 def unlock():
1167 1174 for k, ce in self._filecache.items():
1168 1175 if k == 'dirstate' or k not in self.__dict__:
1169 1176 continue
1170 1177 ce.refresh()
1171 1178
1172 1179 l = self._lock(self.svfs, "lock", wait, unlock,
1173 1180 self.invalidate, _('repository %s') % self.origroot)
1174 1181 self._lockref = weakref.ref(l)
1175 1182 return l
1176 1183
1177 1184 def wlock(self, wait=True):
1178 1185 '''Lock the non-store parts of the repository (everything under
1179 1186 .hg except .hg/store) and return a weak reference to the lock.
1180 1187 Use this before modifying files in .hg.'''
1181 1188 l = self._wlockref and self._wlockref()
1182 1189 if l is not None and l.held:
1183 1190 l.lock()
1184 1191 return l
1185 1192
1186 1193 def unlock():
1187 1194 if self.dirstate.pendingparentchange():
1188 1195 self.dirstate.invalidate()
1189 1196 else:
1190 1197 self.dirstate.write()
1191 1198
1192 1199 self._filecache['dirstate'].refresh()
1193 1200
1194 1201 l = self._lock(self.vfs, "wlock", wait, unlock,
1195 1202 self.invalidatedirstate, _('working directory of %s') %
1196 1203 self.origroot)
1197 1204 self._wlockref = weakref.ref(l)
1198 1205 return l
1199 1206
1200 1207 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1201 1208 """
1202 1209 commit an individual file as part of a larger transaction
1203 1210 """
1204 1211
1205 1212 fname = fctx.path()
1206 1213 text = fctx.data()
1207 1214 flog = self.file(fname)
1208 1215 fparent1 = manifest1.get(fname, nullid)
1209 1216 fparent2 = manifest2.get(fname, nullid)
1210 1217
1211 1218 meta = {}
1212 1219 copy = fctx.renamed()
1213 1220 if copy and copy[0] != fname:
1214 1221 # Mark the new revision of this file as a copy of another
1215 1222 # file. This copy data will effectively act as a parent
1216 1223 # of this new revision. If this is a merge, the first
1217 1224 # parent will be the nullid (meaning "look up the copy data")
1218 1225 # and the second one will be the other parent. For example:
1219 1226 #
1220 1227 # 0 --- 1 --- 3 rev1 changes file foo
1221 1228 # \ / rev2 renames foo to bar and changes it
1222 1229 # \- 2 -/ rev3 should have bar with all changes and
1223 1230 # should record that bar descends from
1224 1231 # bar in rev2 and foo in rev1
1225 1232 #
1226 1233 # this allows this merge to succeed:
1227 1234 #
1228 1235 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1229 1236 # \ / merging rev3 and rev4 should use bar@rev2
1230 1237 # \- 2 --- 4 as the merge base
1231 1238 #
1232 1239
1233 1240 cfname = copy[0]
1234 1241 crev = manifest1.get(cfname)
1235 1242 newfparent = fparent2
1236 1243
1237 1244 if manifest2: # branch merge
1238 1245 if fparent2 == nullid or crev is None: # copied on remote side
1239 1246 if cfname in manifest2:
1240 1247 crev = manifest2[cfname]
1241 1248 newfparent = fparent1
1242 1249
1243 1250 # Here, we used to search backwards through history to try to find
1244 1251 # where the file copy came from if the source of a copy was not in
1245 1252 # the parent directory. However, this doesn't actually make sense to
1246 1253 # do (what does a copy from something not in your working copy even
1247 1254 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1248 1255 # the user that copy information was dropped, so if they didn't
1249 1256 # expect this outcome it can be fixed, but this is the correct
1250 1257 # behavior in this circumstance.
1251 1258
1252 1259 if crev:
1253 1260 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1254 1261 meta["copy"] = cfname
1255 1262 meta["copyrev"] = hex(crev)
1256 1263 fparent1, fparent2 = nullid, newfparent
1257 1264 else:
1258 1265 self.ui.warn(_("warning: can't find ancestor for '%s' "
1259 1266 "copied from '%s'!\n") % (fname, cfname))
1260 1267
1261 1268 elif fparent1 == nullid:
1262 1269 fparent1, fparent2 = fparent2, nullid
1263 1270 elif fparent2 != nullid:
1264 1271 # is one parent an ancestor of the other?
1265 1272 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1266 1273 if fparent1 in fparentancestors:
1267 1274 fparent1, fparent2 = fparent2, nullid
1268 1275 elif fparent2 in fparentancestors:
1269 1276 fparent2 = nullid
1270 1277
1271 1278 # is the file changed?
1272 1279 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1273 1280 changelist.append(fname)
1274 1281 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1275 1282 # are just the flags changed during merge?
1276 1283 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1277 1284 changelist.append(fname)
1278 1285
1279 1286 return fparent1
1280 1287
1281 1288 @unfilteredmethod
1282 1289 def commit(self, text="", user=None, date=None, match=None, force=False,
1283 1290 editor=False, extra={}):
1284 1291 """Add a new revision to current repository.
1285 1292
1286 1293 Revision information is gathered from the working directory,
1287 1294 match can be used to filter the committed files. If editor is
1288 1295 supplied, it is called to get a commit message.
1289 1296 """
1290 1297
1291 1298 def fail(f, msg):
1292 1299 raise util.Abort('%s: %s' % (f, msg))
1293 1300
1294 1301 if not match:
1295 1302 match = matchmod.always(self.root, '')
1296 1303
1297 1304 if not force:
1298 1305 vdirs = []
1299 1306 match.explicitdir = vdirs.append
1300 1307 match.bad = fail
1301 1308
1302 1309 wlock = self.wlock()
1303 1310 try:
1304 1311 wctx = self[None]
1305 1312 merge = len(wctx.parents()) > 1
1306 1313
1307 1314 if (not force and merge and match and
1308 1315 (match.files() or match.anypats())):
1309 1316 raise util.Abort(_('cannot partially commit a merge '
1310 1317 '(do not specify files or patterns)'))
1311 1318
1312 1319 status = self.status(match=match, clean=force)
1313 1320 if force:
1314 1321 status.modified.extend(status.clean) # mq may commit clean files
1315 1322
1316 1323 # check subrepos
1317 1324 subs = []
1318 1325 commitsubs = set()
1319 1326 newstate = wctx.substate.copy()
1320 1327 # only manage subrepos and .hgsubstate if .hgsub is present
1321 1328 if '.hgsub' in wctx:
1322 1329 # we'll decide whether to track this ourselves, thanks
1323 1330 for c in status.modified, status.added, status.removed:
1324 1331 if '.hgsubstate' in c:
1325 1332 c.remove('.hgsubstate')
1326 1333
1327 1334 # compare current state to last committed state
1328 1335 # build new substate based on last committed state
1329 1336 oldstate = wctx.p1().substate
1330 1337 for s in sorted(newstate.keys()):
1331 1338 if not match(s):
1332 1339 # ignore working copy, use old state if present
1333 1340 if s in oldstate:
1334 1341 newstate[s] = oldstate[s]
1335 1342 continue
1336 1343 if not force:
1337 1344 raise util.Abort(
1338 1345 _("commit with new subrepo %s excluded") % s)
1339 1346 if wctx.sub(s).dirty(True):
1340 1347 if not self.ui.configbool('ui', 'commitsubrepos'):
1341 1348 raise util.Abort(
1342 1349 _("uncommitted changes in subrepo %s") % s,
1343 1350 hint=_("use --subrepos for recursive commit"))
1344 1351 subs.append(s)
1345 1352 commitsubs.add(s)
1346 1353 else:
1347 1354 bs = wctx.sub(s).basestate()
1348 1355 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1349 1356 if oldstate.get(s, (None, None, None))[1] != bs:
1350 1357 subs.append(s)
1351 1358
1352 1359 # check for removed subrepos
1353 1360 for p in wctx.parents():
1354 1361 r = [s for s in p.substate if s not in newstate]
1355 1362 subs += [s for s in r if match(s)]
1356 1363 if subs:
1357 1364 if (not match('.hgsub') and
1358 1365 '.hgsub' in (wctx.modified() + wctx.added())):
1359 1366 raise util.Abort(
1360 1367 _("can't commit subrepos without .hgsub"))
1361 1368 status.modified.insert(0, '.hgsubstate')
1362 1369
1363 1370 elif '.hgsub' in status.removed:
1364 1371 # clean up .hgsubstate when .hgsub is removed
1365 1372 if ('.hgsubstate' in wctx and
1366 1373 '.hgsubstate' not in (status.modified + status.added +
1367 1374 status.removed)):
1368 1375 status.removed.insert(0, '.hgsubstate')
1369 1376
1370 1377 # make sure all explicit patterns are matched
1371 1378 if not force and match.files():
1372 1379 matched = set(status.modified + status.added + status.removed)
1373 1380
1374 1381 for f in match.files():
1375 1382 f = self.dirstate.normalize(f)
1376 1383 if f == '.' or f in matched or f in wctx.substate:
1377 1384 continue
1378 1385 if f in status.deleted:
1379 1386 fail(f, _('file not found!'))
1380 1387 if f in vdirs: # visited directory
1381 1388 d = f + '/'
1382 1389 for mf in matched:
1383 1390 if mf.startswith(d):
1384 1391 break
1385 1392 else:
1386 1393 fail(f, _("no match under directory!"))
1387 1394 elif f not in self.dirstate:
1388 1395 fail(f, _("file not tracked!"))
1389 1396
1390 1397 cctx = context.workingcommitctx(self, status,
1391 1398 text, user, date, extra)
1392 1399
1393 1400 if (not force and not extra.get("close") and not merge
1394 1401 and not cctx.files()
1395 1402 and wctx.branch() == wctx.p1().branch()):
1396 1403 return None
1397 1404
1398 1405 if merge and cctx.deleted():
1399 1406 raise util.Abort(_("cannot commit merge with missing files"))
1400 1407
1401 1408 ms = mergemod.mergestate(self)
1402 1409 for f in status.modified:
1403 1410 if f in ms and ms[f] == 'u':
1404 1411 raise util.Abort(_('unresolved merge conflicts '
1405 1412 '(see "hg help resolve")'))
1406 1413
1407 1414 if editor:
1408 1415 cctx._text = editor(self, cctx, subs)
1409 1416 edited = (text != cctx._text)
1410 1417
1411 1418 # Save commit message in case this transaction gets rolled back
1412 1419 # (e.g. by a pretxncommit hook). Leave the content alone on
1413 1420 # the assumption that the user will use the same editor again.
1414 1421 msgfn = self.savecommitmessage(cctx._text)
1415 1422
1416 1423 # commit subs and write new state
1417 1424 if subs:
1418 1425 for s in sorted(commitsubs):
1419 1426 sub = wctx.sub(s)
1420 1427 self.ui.status(_('committing subrepository %s\n') %
1421 1428 subrepo.subrelpath(sub))
1422 1429 sr = sub.commit(cctx._text, user, date)
1423 1430 newstate[s] = (newstate[s][0], sr)
1424 1431 subrepo.writestate(self, newstate)
1425 1432
1426 1433 p1, p2 = self.dirstate.parents()
1427 1434 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1428 1435 try:
1429 1436 self.hook("precommit", throw=True, parent1=hookp1,
1430 1437 parent2=hookp2)
1431 1438 ret = self.commitctx(cctx, True)
1432 1439 except: # re-raises
1433 1440 if edited:
1434 1441 self.ui.write(
1435 1442 _('note: commit message saved in %s\n') % msgfn)
1436 1443 raise
1437 1444
1438 1445 # update bookmarks, dirstate and mergestate
1439 1446 bookmarks.update(self, [p1, p2], ret)
1440 1447 cctx.markcommitted(ret)
1441 1448 ms.reset()
1442 1449 finally:
1443 1450 wlock.release()
1444 1451
1445 1452 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1446 1453 # hack for command that use a temporary commit (eg: histedit)
1447 1454 # temporary commit got stripped before hook release
1448 1455 if node in self:
1449 1456 self.hook("commit", node=node, parent1=parent1,
1450 1457 parent2=parent2)
1451 1458 self._afterlock(commithook)
1452 1459 return ret
1453 1460
1454 1461 @unfilteredmethod
1455 1462 def commitctx(self, ctx, error=False):
1456 1463 """Add a new revision to current repository.
1457 1464 Revision information is passed via the context argument.
1458 1465 """
1459 1466
1460 1467 tr = None
1461 1468 p1, p2 = ctx.p1(), ctx.p2()
1462 1469 user = ctx.user()
1463 1470
1464 1471 lock = self.lock()
1465 1472 try:
1466 1473 tr = self.transaction("commit")
1467 1474 trp = weakref.proxy(tr)
1468 1475
1469 1476 if ctx.files():
1470 1477 m1 = p1.manifest()
1471 1478 m2 = p2.manifest()
1472 1479 m = m1.copy()
1473 1480
1474 1481 # check in files
1475 1482 added = []
1476 1483 changed = []
1477 1484 removed = list(ctx.removed())
1478 1485 linkrev = len(self)
1479 1486 self.ui.note(_("committing files:\n"))
1480 1487 for f in sorted(ctx.modified() + ctx.added()):
1481 1488 self.ui.note(f + "\n")
1482 1489 try:
1483 1490 fctx = ctx[f]
1484 1491 if fctx is None:
1485 1492 removed.append(f)
1486 1493 else:
1487 1494 added.append(f)
1488 1495 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1489 1496 trp, changed)
1490 1497 m.setflag(f, fctx.flags())
1491 1498 except OSError, inst:
1492 1499 self.ui.warn(_("trouble committing %s!\n") % f)
1493 1500 raise
1494 1501 except IOError, inst:
1495 1502 errcode = getattr(inst, 'errno', errno.ENOENT)
1496 1503 if error or errcode and errcode != errno.ENOENT:
1497 1504 self.ui.warn(_("trouble committing %s!\n") % f)
1498 1505 raise
1499 1506
1500 1507 # update manifest
1501 1508 self.ui.note(_("committing manifest\n"))
1502 1509 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1503 1510 drop = [f for f in removed if f in m]
1504 1511 for f in drop:
1505 1512 del m[f]
1506 1513 mn = self.manifest.add(m, trp, linkrev,
1507 1514 p1.manifestnode(), p2.manifestnode(),
1508 1515 added, drop)
1509 1516 files = changed + removed
1510 1517 else:
1511 1518 mn = p1.manifestnode()
1512 1519 files = []
1513 1520
1514 1521 # update changelog
1515 1522 self.ui.note(_("committing changelog\n"))
1516 1523 self.changelog.delayupdate(tr)
1517 1524 n = self.changelog.add(mn, files, ctx.description(),
1518 1525 trp, p1.node(), p2.node(),
1519 1526 user, ctx.date(), ctx.extra().copy())
1520 1527 p = lambda: tr.writepending() and self.root or ""
1521 1528 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1522 1529 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1523 1530 parent2=xp2, pending=p)
1524 1531 # set the new commit is proper phase
1525 1532 targetphase = subrepo.newcommitphase(self.ui, ctx)
1526 1533 if targetphase:
1527 1534 # retract boundary do not alter parent changeset.
1528 1535 # if a parent have higher the resulting phase will
1529 1536 # be compliant anyway
1530 1537 #
1531 1538 # if minimal phase was 0 we don't need to retract anything
1532 1539 phases.retractboundary(self, tr, targetphase, [n])
1533 1540 tr.close()
1534 1541 branchmap.updatecache(self.filtered('served'))
1535 1542 return n
1536 1543 finally:
1537 1544 if tr:
1538 1545 tr.release()
1539 1546 lock.release()
1540 1547
1541 1548 @unfilteredmethod
1542 1549 def destroying(self):
1543 1550 '''Inform the repository that nodes are about to be destroyed.
1544 1551 Intended for use by strip and rollback, so there's a common
1545 1552 place for anything that has to be done before destroying history.
1546 1553
1547 1554 This is mostly useful for saving state that is in memory and waiting
1548 1555 to be flushed when the current lock is released. Because a call to
1549 1556 destroyed is imminent, the repo will be invalidated causing those
1550 1557 changes to stay in memory (waiting for the next unlock), or vanish
1551 1558 completely.
1552 1559 '''
1553 1560 # When using the same lock to commit and strip, the phasecache is left
1554 1561 # dirty after committing. Then when we strip, the repo is invalidated,
1555 1562 # causing those changes to disappear.
1556 1563 if '_phasecache' in vars(self):
1557 1564 self._phasecache.write()
1558 1565
1559 1566 @unfilteredmethod
1560 1567 def destroyed(self):
1561 1568 '''Inform the repository that nodes have been destroyed.
1562 1569 Intended for use by strip and rollback, so there's a common
1563 1570 place for anything that has to be done after destroying history.
1564 1571 '''
1565 1572 # When one tries to:
1566 1573 # 1) destroy nodes thus calling this method (e.g. strip)
1567 1574 # 2) use phasecache somewhere (e.g. commit)
1568 1575 #
1569 1576 # then 2) will fail because the phasecache contains nodes that were
1570 1577 # removed. We can either remove phasecache from the filecache,
1571 1578 # causing it to reload next time it is accessed, or simply filter
1572 1579 # the removed nodes now and write the updated cache.
1573 1580 self._phasecache.filterunknown(self)
1574 1581 self._phasecache.write()
1575 1582
1576 1583 # update the 'served' branch cache to help read only server process
1577 1584 # Thanks to branchcache collaboration this is done from the nearest
1578 1585 # filtered subset and it is expected to be fast.
1579 1586 branchmap.updatecache(self.filtered('served'))
1580 1587
1581 1588 # Ensure the persistent tag cache is updated. Doing it now
1582 1589 # means that the tag cache only has to worry about destroyed
1583 1590 # heads immediately after a strip/rollback. That in turn
1584 1591 # guarantees that "cachetip == currenttip" (comparing both rev
1585 1592 # and node) always means no nodes have been added or destroyed.
1586 1593
1587 1594 # XXX this is suboptimal when qrefresh'ing: we strip the current
1588 1595 # head, refresh the tag cache, then immediately add a new head.
1589 1596 # But I think doing it this way is necessary for the "instant
1590 1597 # tag cache retrieval" case to work.
1591 1598 self.invalidate()
1592 1599
1593 1600 def walk(self, match, node=None):
1594 1601 '''
1595 1602 walk recursively through the directory tree or a given
1596 1603 changeset, finding all files matched by the match
1597 1604 function
1598 1605 '''
1599 1606 return self[node].walk(match)
1600 1607
1601 1608 def status(self, node1='.', node2=None, match=None,
1602 1609 ignored=False, clean=False, unknown=False,
1603 1610 listsubrepos=False):
1604 1611 '''a convenience method that calls node1.status(node2)'''
1605 1612 return self[node1].status(node2, match, ignored, clean, unknown,
1606 1613 listsubrepos)
1607 1614
1608 1615 def heads(self, start=None):
1609 1616 heads = self.changelog.heads(start)
1610 1617 # sort the output in rev descending order
1611 1618 return sorted(heads, key=self.changelog.rev, reverse=True)
1612 1619
1613 1620 def branchheads(self, branch=None, start=None, closed=False):
1614 1621 '''return a (possibly filtered) list of heads for the given branch
1615 1622
1616 1623 Heads are returned in topological order, from newest to oldest.
1617 1624 If branch is None, use the dirstate branch.
1618 1625 If start is not None, return only heads reachable from start.
1619 1626 If closed is True, return heads that are marked as closed as well.
1620 1627 '''
1621 1628 if branch is None:
1622 1629 branch = self[None].branch()
1623 1630 branches = self.branchmap()
1624 1631 if branch not in branches:
1625 1632 return []
1626 1633 # the cache returns heads ordered lowest to highest
1627 1634 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1628 1635 if start is not None:
1629 1636 # filter out the heads that cannot be reached from startrev
1630 1637 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1631 1638 bheads = [h for h in bheads if h in fbheads]
1632 1639 return bheads
1633 1640
1634 1641 def branches(self, nodes):
1635 1642 if not nodes:
1636 1643 nodes = [self.changelog.tip()]
1637 1644 b = []
1638 1645 for n in nodes:
1639 1646 t = n
1640 1647 while True:
1641 1648 p = self.changelog.parents(n)
1642 1649 if p[1] != nullid or p[0] == nullid:
1643 1650 b.append((t, n, p[0], p[1]))
1644 1651 break
1645 1652 n = p[0]
1646 1653 return b
1647 1654
1648 1655 def between(self, pairs):
1649 1656 r = []
1650 1657
1651 1658 for top, bottom in pairs:
1652 1659 n, l, i = top, [], 0
1653 1660 f = 1
1654 1661
1655 1662 while n != bottom and n != nullid:
1656 1663 p = self.changelog.parents(n)[0]
1657 1664 if i == f:
1658 1665 l.append(n)
1659 1666 f = f * 2
1660 1667 n = p
1661 1668 i += 1
1662 1669
1663 1670 r.append(l)
1664 1671
1665 1672 return r
1666 1673
1667 1674 def checkpush(self, pushop):
1668 1675 """Extensions can override this function if additional checks have
1669 1676 to be performed before pushing, or call it if they override push
1670 1677 command.
1671 1678 """
1672 1679 pass
1673 1680
1674 1681 @unfilteredpropertycache
1675 1682 def prepushoutgoinghooks(self):
1676 1683 """Return util.hooks consists of "(repo, remote, outgoing)"
1677 1684 functions, which are called before pushing changesets.
1678 1685 """
1679 1686 return util.hooks()
1680 1687
1681 1688 def stream_in(self, remote, requirements):
1682 1689 lock = self.lock()
1683 1690 try:
1684 1691 # Save remote branchmap. We will use it later
1685 1692 # to speed up branchcache creation
1686 1693 rbranchmap = None
1687 1694 if remote.capable("branchmap"):
1688 1695 rbranchmap = remote.branchmap()
1689 1696
1690 1697 fp = remote.stream_out()
1691 1698 l = fp.readline()
1692 1699 try:
1693 1700 resp = int(l)
1694 1701 except ValueError:
1695 1702 raise error.ResponseError(
1696 1703 _('unexpected response from remote server:'), l)
1697 1704 if resp == 1:
1698 1705 raise util.Abort(_('operation forbidden by server'))
1699 1706 elif resp == 2:
1700 1707 raise util.Abort(_('locking the remote repository failed'))
1701 1708 elif resp != 0:
1702 1709 raise util.Abort(_('the server sent an unknown error code'))
1703 1710 self.ui.status(_('streaming all changes\n'))
1704 1711 l = fp.readline()
1705 1712 try:
1706 1713 total_files, total_bytes = map(int, l.split(' ', 1))
1707 1714 except (ValueError, TypeError):
1708 1715 raise error.ResponseError(
1709 1716 _('unexpected response from remote server:'), l)
1710 1717 self.ui.status(_('%d files to transfer, %s of data\n') %
1711 1718 (total_files, util.bytecount(total_bytes)))
1712 1719 handled_bytes = 0
1713 1720 self.ui.progress(_('clone'), 0, total=total_bytes)
1714 1721 start = time.time()
1715 1722
1716 1723 tr = self.transaction(_('clone'))
1717 1724 try:
1718 1725 for i in xrange(total_files):
1719 1726 # XXX doesn't support '\n' or '\r' in filenames
1720 1727 l = fp.readline()
1721 1728 try:
1722 1729 name, size = l.split('\0', 1)
1723 1730 size = int(size)
1724 1731 except (ValueError, TypeError):
1725 1732 raise error.ResponseError(
1726 1733 _('unexpected response from remote server:'), l)
1727 1734 if self.ui.debugflag:
1728 1735 self.ui.debug('adding %s (%s)\n' %
1729 1736 (name, util.bytecount(size)))
1730 1737 # for backwards compat, name was partially encoded
1731 1738 ofp = self.svfs(store.decodedir(name), 'w')
1732 1739 for chunk in util.filechunkiter(fp, limit=size):
1733 1740 handled_bytes += len(chunk)
1734 1741 self.ui.progress(_('clone'), handled_bytes,
1735 1742 total=total_bytes)
1736 1743 ofp.write(chunk)
1737 1744 ofp.close()
1738 1745 tr.close()
1739 1746 finally:
1740 1747 tr.release()
1741 1748
1742 1749 # Writing straight to files circumvented the inmemory caches
1743 1750 self.invalidate()
1744 1751
1745 1752 elapsed = time.time() - start
1746 1753 if elapsed <= 0:
1747 1754 elapsed = 0.001
1748 1755 self.ui.progress(_('clone'), None)
1749 1756 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1750 1757 (util.bytecount(total_bytes), elapsed,
1751 1758 util.bytecount(total_bytes / elapsed)))
1752 1759
1753 1760 # new requirements = old non-format requirements +
1754 1761 # new format-related
1755 1762 # requirements from the streamed-in repository
1756 1763 requirements.update(set(self.requirements) - self.supportedformats)
1757 1764 self._applyrequirements(requirements)
1758 1765 self._writerequirements()
1759 1766
1760 1767 if rbranchmap:
1761 1768 rbheads = []
1762 1769 closed = []
1763 1770 for bheads in rbranchmap.itervalues():
1764 1771 rbheads.extend(bheads)
1765 1772 for h in bheads:
1766 1773 r = self.changelog.rev(h)
1767 1774 b, c = self.changelog.branchinfo(r)
1768 1775 if c:
1769 1776 closed.append(h)
1770 1777
1771 1778 if rbheads:
1772 1779 rtiprev = max((int(self.changelog.rev(node))
1773 1780 for node in rbheads))
1774 1781 cache = branchmap.branchcache(rbranchmap,
1775 1782 self[rtiprev].node(),
1776 1783 rtiprev,
1777 1784 closednodes=closed)
1778 1785 # Try to stick it as low as possible
1779 1786 # filter above served are unlikely to be fetch from a clone
1780 1787 for candidate in ('base', 'immutable', 'served'):
1781 1788 rview = self.filtered(candidate)
1782 1789 if cache.validfor(rview):
1783 1790 self._branchcaches[candidate] = cache
1784 1791 cache.write(rview)
1785 1792 break
1786 1793 self.invalidate()
1787 1794 return len(self.heads()) + 1
1788 1795 finally:
1789 1796 lock.release()
1790 1797
1791 1798 def clone(self, remote, heads=[], stream=None):
1792 1799 '''clone remote repository.
1793 1800
1794 1801 keyword arguments:
1795 1802 heads: list of revs to clone (forces use of pull)
1796 1803 stream: use streaming clone if possible'''
1797 1804
1798 1805 # now, all clients that can request uncompressed clones can
1799 1806 # read repo formats supported by all servers that can serve
1800 1807 # them.
1801 1808
1802 1809 # if revlog format changes, client will have to check version
1803 1810 # and format flags on "stream" capability, and use
1804 1811 # uncompressed only if compatible.
1805 1812
1806 1813 if stream is None:
1807 1814 # if the server explicitly prefers to stream (for fast LANs)
1808 1815 stream = remote.capable('stream-preferred')
1809 1816
1810 1817 if stream and not heads:
1811 1818 # 'stream' means remote revlog format is revlogv1 only
1812 1819 if remote.capable('stream'):
1813 1820 self.stream_in(remote, set(('revlogv1',)))
1814 1821 else:
1815 1822 # otherwise, 'streamreqs' contains the remote revlog format
1816 1823 streamreqs = remote.capable('streamreqs')
1817 1824 if streamreqs:
1818 1825 streamreqs = set(streamreqs.split(','))
1819 1826 # if we support it, stream in and adjust our requirements
1820 1827 if not streamreqs - self.supportedformats:
1821 1828 self.stream_in(remote, streamreqs)
1822 1829
1823 1830 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1824 1831 try:
1825 1832 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1826 1833 ret = exchange.pull(self, remote, heads).cgresult
1827 1834 finally:
1828 1835 self.ui.restoreconfig(quiet)
1829 1836 return ret
1830 1837
1831 1838 def pushkey(self, namespace, key, old, new):
1832 1839 try:
1833 1840 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1834 1841 old=old, new=new)
1835 1842 except error.HookAbort, exc:
1836 1843 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1837 1844 if exc.hint:
1838 1845 self.ui.write_err(_("(%s)\n") % exc.hint)
1839 1846 return False
1840 1847 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1841 1848 ret = pushkey.push(self, namespace, key, old, new)
1842 1849 def runhook():
1843 1850 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1844 1851 ret=ret)
1845 1852 self._afterlock(runhook)
1846 1853 return ret
1847 1854
1848 1855 def listkeys(self, namespace):
1849 1856 self.hook('prelistkeys', throw=True, namespace=namespace)
1850 1857 self.ui.debug('listing keys for "%s"\n' % namespace)
1851 1858 values = pushkey.list(self, namespace)
1852 1859 self.hook('listkeys', namespace=namespace, values=values)
1853 1860 return values
1854 1861
1855 1862 def debugwireargs(self, one, two, three=None, four=None, five=None):
1856 1863 '''used to test argument passing over the wire'''
1857 1864 return "%s %s %s %s %s" % (one, two, three, four, five)
1858 1865
1859 1866 def savecommitmessage(self, text):
1860 1867 fp = self.vfs('last-message.txt', 'wb')
1861 1868 try:
1862 1869 fp.write(text)
1863 1870 finally:
1864 1871 fp.close()
1865 1872 return self.pathto(fp.name[len(self.root) + 1:])
1866 1873
1867 1874 # used to avoid circular references so destructors work
1868 1875 def aftertrans(files):
1869 1876 renamefiles = [tuple(t) for t in files]
1870 1877 def a():
1871 1878 for vfs, src, dest in renamefiles:
1872 1879 try:
1873 1880 vfs.rename(src, dest)
1874 1881 except OSError: # journal file does not yet exist
1875 1882 pass
1876 1883 return a
1877 1884
1878 1885 def undoname(fn):
1879 1886 base, name = os.path.split(fn)
1880 1887 assert name.startswith('journal')
1881 1888 return os.path.join(base, name.replace('journal', 'undo', 1))
1882 1889
1883 1890 def instance(ui, path, create):
1884 1891 return localrepository(ui, util.urllocalpath(path), create)
1885 1892
1886 1893 def islocal(path):
1887 1894 return True
@@ -1,166 +1,167 b''
1 1 # statichttprepo.py - simple http repository class for mercurial
2 2 #
3 3 # This provides read-only repo access to repositories exported via static http
4 4 #
5 5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 from i18n import _
11 11 import changelog, byterange, url, error, namespaces
12 12 import localrepo, manifest, util, scmutil, store
13 13 import urllib, urllib2, errno, os
14 14
15 15 class httprangereader(object):
16 16 def __init__(self, url, opener):
17 17 # we assume opener has HTTPRangeHandler
18 18 self.url = url
19 19 self.pos = 0
20 20 self.opener = opener
21 21 self.name = url
22 22 def seek(self, pos):
23 23 self.pos = pos
24 24 def read(self, bytes=None):
25 25 req = urllib2.Request(self.url)
26 26 end = ''
27 27 if bytes:
28 28 end = self.pos + bytes - 1
29 29 if self.pos or end:
30 30 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
31 31
32 32 try:
33 33 f = self.opener.open(req)
34 34 data = f.read()
35 35 # Python 2.6+ defines a getcode() function, and 2.4 and
36 36 # 2.5 appear to always have an undocumented code attribute
37 37 # set. If we can't read either of those, fall back to 206
38 38 # and hope for the best.
39 39 code = getattr(f, 'getcode', lambda : getattr(f, 'code', 206))()
40 40 except urllib2.HTTPError, inst:
41 41 num = inst.code == 404 and errno.ENOENT or None
42 42 raise IOError(num, inst)
43 43 except urllib2.URLError, inst:
44 44 raise IOError(None, inst.reason[1])
45 45
46 46 if code == 200:
47 47 # HTTPRangeHandler does nothing if remote does not support
48 48 # Range headers and returns the full entity. Let's slice it.
49 49 if bytes:
50 50 data = data[self.pos:self.pos + bytes]
51 51 else:
52 52 data = data[self.pos:]
53 53 elif bytes:
54 54 data = data[:bytes]
55 55 self.pos += len(data)
56 56 return data
57 57 def readlines(self):
58 58 return self.read().splitlines(True)
59 59 def __iter__(self):
60 60 return iter(self.readlines())
61 61 def close(self):
62 62 pass
63 63
64 64 def build_opener(ui, authinfo):
65 65 # urllib cannot handle URLs with embedded user or passwd
66 66 urlopener = url.opener(ui, authinfo)
67 67 urlopener.add_handler(byterange.HTTPRangeHandler())
68 68
69 69 class statichttpvfs(scmutil.abstractvfs):
70 70 def __init__(self, base):
71 71 self.base = base
72 72
73 73 def __call__(self, path, mode='r', *args, **kw):
74 74 if mode not in ('r', 'rb'):
75 75 raise IOError('Permission denied')
76 76 f = "/".join((self.base, urllib.quote(path)))
77 77 return httprangereader(f, urlopener)
78 78
79 79 def join(self, path):
80 80 if path:
81 81 return os.path.join(self.base, path)
82 82 else:
83 83 return self.base
84 84
85 85 return statichttpvfs
86 86
87 87 class statichttppeer(localrepo.localpeer):
88 88 def local(self):
89 89 return None
90 90 def canpush(self):
91 91 return False
92 92
93 93 class statichttprepository(localrepo.localrepository):
94 94 supported = localrepo.localrepository._basesupported
95 95
96 96 def __init__(self, ui, path):
97 97 self._url = path
98 98 self.ui = ui
99 99
100 100 self.root = path
101 101 u = util.url(path.rstrip('/') + "/.hg")
102 102 self.path, authinfo = u.authinfo()
103 103
104 104 opener = build_opener(ui, authinfo)
105 105 self.opener = opener(self.path)
106 106 self.vfs = self.opener
107 107 self._phasedefaults = []
108 108
109 109 self.names = namespaces.namespaces()
110 110
111 111 try:
112 112 requirements = scmutil.readrequires(self.vfs, self.supported)
113 113 except IOError, inst:
114 114 if inst.errno != errno.ENOENT:
115 115 raise
116 116 requirements = set()
117 117
118 118 # check if it is a non-empty old-style repository
119 119 try:
120 120 fp = self.vfs("00changelog.i")
121 121 fp.read(1)
122 122 fp.close()
123 123 except IOError, inst:
124 124 if inst.errno != errno.ENOENT:
125 125 raise
126 126 # we do not care about empty old-style repositories here
127 127 msg = _("'%s' does not appear to be an hg repository") % path
128 128 raise error.RepoError(msg)
129 129
130 130 # setup store
131 131 self.store = store.store(requirements, self.path, opener)
132 132 self.spath = self.store.path
133 133 self.svfs = self.store.opener
134 134 self.sopener = self.svfs
135 135 self.sjoin = self.store.join
136 136 self._filecache = {}
137 137 self.requirements = requirements
138 138
139 139 self.manifest = manifest.manifest(self.svfs)
140 140 self.changelog = changelog.changelog(self.svfs)
141 141 self._tags = None
142 142 self.nodetagscache = None
143 143 self._branchcaches = {}
144 self._revbranchcache = None
144 145 self.encodepats = None
145 146 self.decodepats = None
146 147
147 148 def _restrictcapabilities(self, caps):
148 149 caps = super(statichttprepository, self)._restrictcapabilities(caps)
149 150 return caps.difference(["pushkey"])
150 151
151 152 def url(self):
152 153 return self._url
153 154
154 155 def local(self):
155 156 return False
156 157
157 158 def peer(self):
158 159 return statichttppeer(self)
159 160
160 161 def lock(self, wait=True):
161 162 raise util.Abort(_('cannot lock static-http repository'))
162 163
163 164 def instance(ui, path, create):
164 165 if create:
165 166 raise util.Abort(_('cannot create new static-http repository'))
166 167 return statichttprepository(ui, path[7:])
General Comments 0
You need to be logged in to leave comments. Login now