##// END OF EJS Templates
bookmarks: delegate writing to the repo just like reading...
Augie Fackler -
r15237:7196ed7a default
parent child Browse files
Show More
@@ -1,213 +1,213 b''
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.node import hex
10 10 from mercurial import encoding, error, util
11 11 import errno, os
12 12
13 13 def valid(mark):
14 14 for c in (':', '\0', '\n', '\r'):
15 15 if c in mark:
16 16 return False
17 17 return True
18 18
19 19 def read(repo):
20 20 '''Parse .hg/bookmarks file and return a dictionary
21 21
22 22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
23 23 in the .hg/bookmarks file.
24 24 Read the file and return a (name=>nodeid) dictionary
25 25 '''
26 26 bookmarks = {}
27 27 try:
28 28 for line in repo.opener('bookmarks'):
29 29 line = line.strip()
30 30 if not line:
31 31 continue
32 32 if ' ' not in line:
33 33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
34 34 continue
35 35 sha, refspec = line.split(' ', 1)
36 36 refspec = encoding.tolocal(refspec)
37 37 try:
38 38 bookmarks[refspec] = repo.changelog.lookup(sha)
39 39 except error.RepoLookupError:
40 40 pass
41 41 except IOError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 return bookmarks
45 45
46 46 def readcurrent(repo):
47 47 '''Get the current bookmark
48 48
49 49 If we use gittishsh branches we have a current bookmark that
50 50 we are on. This function returns the name of the bookmark. It
51 51 is stored in .hg/bookmarks.current
52 52 '''
53 53 mark = None
54 54 try:
55 55 file = repo.opener('bookmarks.current')
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 return None
60 60 try:
61 61 # No readline() in posixfile_nt, reading everything is cheap
62 62 mark = encoding.tolocal((file.readlines() or [''])[0])
63 63 if mark == '' or mark not in repo._bookmarks:
64 64 mark = None
65 65 finally:
66 66 file.close()
67 67 return mark
68 68
69 69 def write(repo):
70 70 '''Write bookmarks
71 71
72 72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
73 73 in a format equal to those of localtags.
74 74
75 75 We also store a backup of the previous state in undo.bookmarks that
76 76 can be copied back on rollback.
77 77 '''
78 78 refs = repo._bookmarks
79 79
80 80 if repo._bookmarkcurrent not in refs:
81 81 setcurrent(repo, None)
82 82 for mark in refs.keys():
83 83 if not valid(mark):
84 84 raise util.Abort(_("bookmark '%s' contains illegal "
85 85 "character" % mark))
86 86
87 87 wlock = repo.wlock()
88 88 try:
89 89
90 90 file = repo.opener('bookmarks', 'w', atomictemp=True)
91 91 for refspec, node in refs.iteritems():
92 92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
93 93 file.close()
94 94
95 95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
96 96 try:
97 97 os.utime(repo.sjoin('00changelog.i'), None)
98 98 except OSError:
99 99 pass
100 100
101 101 finally:
102 102 wlock.release()
103 103
104 104 def setcurrent(repo, mark):
105 105 '''Set the name of the bookmark that we are currently on
106 106
107 107 Set the name of the bookmark that we are on (hg update <bookmark>).
108 108 The name is recorded in .hg/bookmarks.current
109 109 '''
110 110 current = repo._bookmarkcurrent
111 111 if current == mark:
112 112 return
113 113
114 114 if mark not in repo._bookmarks:
115 115 mark = ''
116 116 if not valid(mark):
117 117 raise util.Abort(_("bookmark '%s' contains illegal "
118 118 "character" % mark))
119 119
120 120 wlock = repo.wlock()
121 121 try:
122 122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
123 123 file.write(encoding.fromlocal(mark))
124 124 file.close()
125 125 finally:
126 126 wlock.release()
127 127 repo._bookmarkcurrent = mark
128 128
129 129 def updatecurrentbookmark(repo, oldnode, curbranch):
130 130 try:
131 131 update(repo, oldnode, repo.branchtags()[curbranch])
132 132 except KeyError:
133 133 if curbranch == "default": # no default branch!
134 134 update(repo, oldnode, repo.lookup("tip"))
135 135 else:
136 136 raise util.Abort(_("branch %s not found") % curbranch)
137 137
138 138 def update(repo, parents, node):
139 139 marks = repo._bookmarks
140 140 update = False
141 141 mark = repo._bookmarkcurrent
142 142 if mark and marks[mark] in parents:
143 143 old = repo[marks[mark]]
144 144 new = repo[node]
145 145 if new in old.descendants():
146 146 marks[mark] = new.node()
147 147 update = True
148 148 if update:
149 write(repo)
149 repo._writebookmarks(marks)
150 150
151 151 def listbookmarks(repo):
152 152 # We may try to list bookmarks on a repo type that does not
153 153 # support it (e.g., statichttprepository).
154 154 marks = getattr(repo, '_bookmarks', {})
155 155
156 156 d = {}
157 157 for k, v in marks.iteritems():
158 158 d[k] = hex(v)
159 159 return d
160 160
161 161 def pushbookmark(repo, key, old, new):
162 162 w = repo.wlock()
163 163 try:
164 164 marks = repo._bookmarks
165 165 if hex(marks.get(key, '')) != old:
166 166 return False
167 167 if new == '':
168 168 del marks[key]
169 169 else:
170 170 if new not in repo:
171 171 return False
172 172 marks[key] = repo[new].node()
173 173 write(repo)
174 174 return True
175 175 finally:
176 176 w.release()
177 177
178 178 def updatefromremote(ui, repo, remote):
179 179 ui.debug("checking for updated bookmarks\n")
180 180 rb = remote.listkeys('bookmarks')
181 181 changed = False
182 182 for k in rb.keys():
183 183 if k in repo._bookmarks:
184 184 nr, nl = rb[k], repo._bookmarks[k]
185 185 if nr in repo:
186 186 cr = repo[nr]
187 187 cl = repo[nl]
188 188 if cl.rev() >= cr.rev():
189 189 continue
190 190 if cr in cl.descendants():
191 191 repo._bookmarks[k] = cr.node()
192 192 changed = True
193 193 ui.status(_("updating bookmark %s\n") % k)
194 194 else:
195 195 ui.warn(_("not updating divergent"
196 196 " bookmark %s\n") % k)
197 197 if changed:
198 198 write(repo)
199 199
200 200 def diff(ui, repo, remote):
201 201 ui.status(_("searching for changed bookmarks\n"))
202 202
203 203 lmarks = repo.listkeys('bookmarks')
204 204 rmarks = remote.listkeys('bookmarks')
205 205
206 206 diff = sorted(set(rmarks) - set(lmarks))
207 207 for k in diff:
208 208 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
209 209
210 210 if len(diff) <= 0:
211 211 ui.status(_("no changed bookmarks found\n"))
212 212 return 1
213 213 return 0
@@ -1,2081 +1,2084 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener.append(
61 61 "00changelog.i",
62 62 '\0\0\0\2' # represents revlogv2
63 63 ' dummy changelog to prevent using the old repo layout'
64 64 )
65 65 if self.ui.configbool('format', 'generaldelta', False):
66 66 requirements.append("generaldelta")
67 67 requirements = set(requirements)
68 68 else:
69 69 raise error.RepoError(_("repository %s not found") % path)
70 70 elif create:
71 71 raise error.RepoError(_("repository %s already exists") % path)
72 72 else:
73 73 try:
74 74 requirements = scmutil.readrequires(self.opener, self.supported)
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 requirements = set()
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100
101 101 self._branchcache = None
102 102 self._branchcachetip = None
103 103 self.filterpats = {}
104 104 self._datafilters = {}
105 105 self._transref = self._lockref = self._wlockref = None
106 106
107 107 # A cache for various files under .hg/ that tracks file changes,
108 108 # (used by the filecache decorator)
109 109 #
110 110 # Maps a property name to its util.filecacheentry
111 111 self._filecache = {}
112 112
113 113 def _applyrequirements(self, requirements):
114 114 self.requirements = requirements
115 115 openerreqs = set(('revlogv1', 'generaldelta'))
116 116 self.sopener.options = dict((r, 1) for r in requirements
117 117 if r in openerreqs)
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 130
131 131 # XXX: Checking against the current working copy is wrong in
132 132 # the sense that it can reject things like
133 133 #
134 134 # $ hg cat -r 10 sub/x.txt
135 135 #
136 136 # if sub/ is no longer a subrepository in the working copy
137 137 # parent revision.
138 138 #
139 139 # However, it can of course also allow things that would have
140 140 # been rejected before, such as the above cat command if sub/
141 141 # is a subrepository now, but was a normal directory before.
142 142 # The old path auditor would have rejected by mistake since it
143 143 # panics when it sees sub/.hg/.
144 144 #
145 145 # All in all, checking against the working copy seems sensible
146 146 # since we want to prevent access to nested repositories on
147 147 # the filesystem *now*.
148 148 ctx = self[None]
149 149 parts = util.splitpath(subpath)
150 150 while parts:
151 151 prefix = os.sep.join(parts)
152 152 if prefix in ctx.substate:
153 153 if prefix == subpath:
154 154 return True
155 155 else:
156 156 sub = ctx.sub(prefix)
157 157 return sub.checknested(subpath[len(prefix) + 1:])
158 158 else:
159 159 parts.pop()
160 160 return False
161 161
162 162 @filecache('bookmarks')
163 163 def _bookmarks(self):
164 164 return bookmarks.read(self)
165 165
166 166 @filecache('bookmarks.current')
167 167 def _bookmarkcurrent(self):
168 168 return bookmarks.readcurrent(self)
169 169
170 def _writebookmarks(self, marks):
171 bookmarks.write(self)
172
170 173 @filecache('00changelog.i', True)
171 174 def changelog(self):
172 175 c = changelog.changelog(self.sopener)
173 176 if 'HG_PENDING' in os.environ:
174 177 p = os.environ['HG_PENDING']
175 178 if p.startswith(self.root):
176 179 c.readpending('00changelog.i.a')
177 180 return c
178 181
179 182 @filecache('00manifest.i', True)
180 183 def manifest(self):
181 184 return manifest.manifest(self.sopener)
182 185
183 186 @filecache('dirstate')
184 187 def dirstate(self):
185 188 warned = [0]
186 189 def validate(node):
187 190 try:
188 191 self.changelog.rev(node)
189 192 return node
190 193 except error.LookupError:
191 194 if not warned[0]:
192 195 warned[0] = True
193 196 self.ui.warn(_("warning: ignoring unknown"
194 197 " working parent %s!\n") % short(node))
195 198 return nullid
196 199
197 200 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198 201
199 202 def __getitem__(self, changeid):
200 203 if changeid is None:
201 204 return context.workingctx(self)
202 205 return context.changectx(self, changeid)
203 206
204 207 def __contains__(self, changeid):
205 208 try:
206 209 return bool(self.lookup(changeid))
207 210 except error.RepoLookupError:
208 211 return False
209 212
210 213 def __nonzero__(self):
211 214 return True
212 215
213 216 def __len__(self):
214 217 return len(self.changelog)
215 218
216 219 def __iter__(self):
217 220 for i in xrange(len(self)):
218 221 yield i
219 222
220 223 def set(self, expr, *args):
221 224 '''
222 225 Yield a context for each matching revision, after doing arg
223 226 replacement via revset.formatspec
224 227 '''
225 228
226 229 expr = revset.formatspec(expr, *args)
227 230 m = revset.match(None, expr)
228 231 for r in m(self, range(len(self))):
229 232 yield self[r]
230 233
231 234 def url(self):
232 235 return 'file:' + self.root
233 236
234 237 def hook(self, name, throw=False, **args):
235 238 return hook.hook(self.ui, self, name, throw, **args)
236 239
237 240 tag_disallowed = ':\r\n'
238 241
239 242 def _tag(self, names, node, message, local, user, date, extra={}):
240 243 if isinstance(names, str):
241 244 allchars = names
242 245 names = (names,)
243 246 else:
244 247 allchars = ''.join(names)
245 248 for c in self.tag_disallowed:
246 249 if c in allchars:
247 250 raise util.Abort(_('%r cannot be used in a tag name') % c)
248 251
249 252 branches = self.branchmap()
250 253 for name in names:
251 254 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 255 local=local)
253 256 if name in branches:
254 257 self.ui.warn(_("warning: tag %s conflicts with existing"
255 258 " branch name\n") % name)
256 259
257 260 def writetags(fp, names, munge, prevtags):
258 261 fp.seek(0, 2)
259 262 if prevtags and prevtags[-1] != '\n':
260 263 fp.write('\n')
261 264 for name in names:
262 265 m = munge and munge(name) or name
263 266 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
264 267 old = self.tags().get(name, nullid)
265 268 fp.write('%s %s\n' % (hex(old), m))
266 269 fp.write('%s %s\n' % (hex(node), m))
267 270 fp.close()
268 271
269 272 prevtags = ''
270 273 if local:
271 274 try:
272 275 fp = self.opener('localtags', 'r+')
273 276 except IOError:
274 277 fp = self.opener('localtags', 'a')
275 278 else:
276 279 prevtags = fp.read()
277 280
278 281 # local tags are stored in the current charset
279 282 writetags(fp, names, None, prevtags)
280 283 for name in names:
281 284 self.hook('tag', node=hex(node), tag=name, local=local)
282 285 return
283 286
284 287 try:
285 288 fp = self.wfile('.hgtags', 'rb+')
286 289 except IOError, e:
287 290 if e.errno != errno.ENOENT:
288 291 raise
289 292 fp = self.wfile('.hgtags', 'ab')
290 293 else:
291 294 prevtags = fp.read()
292 295
293 296 # committed tags are stored in UTF-8
294 297 writetags(fp, names, encoding.fromlocal, prevtags)
295 298
296 299 fp.close()
297 300
298 301 if '.hgtags' not in self.dirstate:
299 302 self[None].add(['.hgtags'])
300 303
301 304 m = matchmod.exact(self.root, '', ['.hgtags'])
302 305 tagnode = self.commit(message, user, date, extra=extra, match=m)
303 306
304 307 for name in names:
305 308 self.hook('tag', node=hex(node), tag=name, local=local)
306 309
307 310 return tagnode
308 311
309 312 def tag(self, names, node, message, local, user, date):
310 313 '''tag a revision with one or more symbolic names.
311 314
312 315 names is a list of strings or, when adding a single tag, names may be a
313 316 string.
314 317
315 318 if local is True, the tags are stored in a per-repository file.
316 319 otherwise, they are stored in the .hgtags file, and a new
317 320 changeset is committed with the change.
318 321
319 322 keyword arguments:
320 323
321 324 local: whether to store tags in non-version-controlled file
322 325 (default False)
323 326
324 327 message: commit message to use if committing
325 328
326 329 user: name of user to use if committing
327 330
328 331 date: date tuple to use if committing'''
329 332
330 333 if not local:
331 334 for x in self.status()[:5]:
332 335 if '.hgtags' in x:
333 336 raise util.Abort(_('working copy of .hgtags is changed '
334 337 '(please commit .hgtags manually)'))
335 338
336 339 self.tags() # instantiate the cache
337 340 self._tag(names, node, message, local, user, date)
338 341
339 342 @propertycache
340 343 def _tagscache(self):
341 344 '''Returns a tagscache object that contains various tags related caches.'''
342 345
343 346 # This simplifies its cache management by having one decorated
344 347 # function (this one) and the rest simply fetch things from it.
345 348 class tagscache(object):
346 349 def __init__(self):
347 350 # These two define the set of tags for this repository. tags
348 351 # maps tag name to node; tagtypes maps tag name to 'global' or
349 352 # 'local'. (Global tags are defined by .hgtags across all
350 353 # heads, and local tags are defined in .hg/localtags.)
351 354 # They constitute the in-memory cache of tags.
352 355 self.tags = self.tagtypes = None
353 356
354 357 self.nodetagscache = self.tagslist = None
355 358
356 359 cache = tagscache()
357 360 cache.tags, cache.tagtypes = self._findtags()
358 361
359 362 return cache
360 363
361 364 def tags(self):
362 365 '''return a mapping of tag to node'''
363 366 return self._tagscache.tags
364 367
365 368 def _findtags(self):
366 369 '''Do the hard work of finding tags. Return a pair of dicts
367 370 (tags, tagtypes) where tags maps tag name to node, and tagtypes
368 371 maps tag name to a string like \'global\' or \'local\'.
369 372 Subclasses or extensions are free to add their own tags, but
370 373 should be aware that the returned dicts will be retained for the
371 374 duration of the localrepo object.'''
372 375
373 376 # XXX what tagtype should subclasses/extensions use? Currently
374 377 # mq and bookmarks add tags, but do not set the tagtype at all.
375 378 # Should each extension invent its own tag type? Should there
376 379 # be one tagtype for all such "virtual" tags? Or is the status
377 380 # quo fine?
378 381
379 382 alltags = {} # map tag name to (node, hist)
380 383 tagtypes = {}
381 384
382 385 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
383 386 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
384 387
385 388 # Build the return dicts. Have to re-encode tag names because
386 389 # the tags module always uses UTF-8 (in order not to lose info
387 390 # writing to the cache), but the rest of Mercurial wants them in
388 391 # local encoding.
389 392 tags = {}
390 393 for (name, (node, hist)) in alltags.iteritems():
391 394 if node != nullid:
392 395 try:
393 396 # ignore tags to unknown nodes
394 397 self.changelog.lookup(node)
395 398 tags[encoding.tolocal(name)] = node
396 399 except error.LookupError:
397 400 pass
398 401 tags['tip'] = self.changelog.tip()
399 402 tagtypes = dict([(encoding.tolocal(name), value)
400 403 for (name, value) in tagtypes.iteritems()])
401 404 return (tags, tagtypes)
402 405
403 406 def tagtype(self, tagname):
404 407 '''
405 408 return the type of the given tag. result can be:
406 409
407 410 'local' : a local tag
408 411 'global' : a global tag
409 412 None : tag does not exist
410 413 '''
411 414
412 415 return self._tagscache.tagtypes.get(tagname)
413 416
414 417 def tagslist(self):
415 418 '''return a list of tags ordered by revision'''
416 419 if not self._tagscache.tagslist:
417 420 l = []
418 421 for t, n in self.tags().iteritems():
419 422 r = self.changelog.rev(n)
420 423 l.append((r, t, n))
421 424 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
422 425
423 426 return self._tagscache.tagslist
424 427
425 428 def nodetags(self, node):
426 429 '''return the tags associated with a node'''
427 430 if not self._tagscache.nodetagscache:
428 431 nodetagscache = {}
429 432 for t, n in self.tags().iteritems():
430 433 nodetagscache.setdefault(n, []).append(t)
431 434 for tags in nodetagscache.itervalues():
432 435 tags.sort()
433 436 self._tagscache.nodetagscache = nodetagscache
434 437 return self._tagscache.nodetagscache.get(node, [])
435 438
436 439 def nodebookmarks(self, node):
437 440 marks = []
438 441 for bookmark, n in self._bookmarks.iteritems():
439 442 if n == node:
440 443 marks.append(bookmark)
441 444 return sorted(marks)
442 445
443 446 def _branchtags(self, partial, lrev):
444 447 # TODO: rename this function?
445 448 tiprev = len(self) - 1
446 449 if lrev != tiprev:
447 450 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
448 451 self._updatebranchcache(partial, ctxgen)
449 452 self._writebranchcache(partial, self.changelog.tip(), tiprev)
450 453
451 454 return partial
452 455
453 456 def updatebranchcache(self):
454 457 tip = self.changelog.tip()
455 458 if self._branchcache is not None and self._branchcachetip == tip:
456 459 return self._branchcache
457 460
458 461 oldtip = self._branchcachetip
459 462 self._branchcachetip = tip
460 463 if oldtip is None or oldtip not in self.changelog.nodemap:
461 464 partial, last, lrev = self._readbranchcache()
462 465 else:
463 466 lrev = self.changelog.rev(oldtip)
464 467 partial = self._branchcache
465 468
466 469 self._branchtags(partial, lrev)
467 470 # this private cache holds all heads (not just tips)
468 471 self._branchcache = partial
469 472
470 473 def branchmap(self):
471 474 '''returns a dictionary {branch: [branchheads]}'''
472 475 self.updatebranchcache()
473 476 return self._branchcache
474 477
475 478 def branchtags(self):
476 479 '''return a dict where branch names map to the tipmost head of
477 480 the branch, open heads come before closed'''
478 481 bt = {}
479 482 for bn, heads in self.branchmap().iteritems():
480 483 tip = heads[-1]
481 484 for h in reversed(heads):
482 485 if 'close' not in self.changelog.read(h)[5]:
483 486 tip = h
484 487 break
485 488 bt[bn] = tip
486 489 return bt
487 490
488 491 def _readbranchcache(self):
489 492 partial = {}
490 493 try:
491 494 f = self.opener("cache/branchheads")
492 495 lines = f.read().split('\n')
493 496 f.close()
494 497 except (IOError, OSError):
495 498 return {}, nullid, nullrev
496 499
497 500 try:
498 501 last, lrev = lines.pop(0).split(" ", 1)
499 502 last, lrev = bin(last), int(lrev)
500 503 if lrev >= len(self) or self[lrev].node() != last:
501 504 # invalidate the cache
502 505 raise ValueError('invalidating branch cache (tip differs)')
503 506 for l in lines:
504 507 if not l:
505 508 continue
506 509 node, label = l.split(" ", 1)
507 510 label = encoding.tolocal(label.strip())
508 511 partial.setdefault(label, []).append(bin(node))
509 512 except KeyboardInterrupt:
510 513 raise
511 514 except Exception, inst:
512 515 if self.ui.debugflag:
513 516 self.ui.warn(str(inst), '\n')
514 517 partial, last, lrev = {}, nullid, nullrev
515 518 return partial, last, lrev
516 519
517 520 def _writebranchcache(self, branches, tip, tiprev):
518 521 try:
519 522 f = self.opener("cache/branchheads", "w", atomictemp=True)
520 523 f.write("%s %s\n" % (hex(tip), tiprev))
521 524 for label, nodes in branches.iteritems():
522 525 for node in nodes:
523 526 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
524 527 f.close()
525 528 except (IOError, OSError):
526 529 pass
527 530
528 531 def _updatebranchcache(self, partial, ctxgen):
529 532 # collect new branch entries
530 533 newbranches = {}
531 534 for c in ctxgen:
532 535 newbranches.setdefault(c.branch(), []).append(c.node())
533 536 # if older branchheads are reachable from new ones, they aren't
534 537 # really branchheads. Note checking parents is insufficient:
535 538 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
536 539 for branch, newnodes in newbranches.iteritems():
537 540 bheads = partial.setdefault(branch, [])
538 541 bheads.extend(newnodes)
539 542 if len(bheads) <= 1:
540 543 continue
541 544 bheads = sorted(bheads, key=lambda x: self[x].rev())
542 545 # starting from tip means fewer passes over reachable
543 546 while newnodes:
544 547 latest = newnodes.pop()
545 548 if latest not in bheads:
546 549 continue
547 550 minbhrev = self[bheads[0]].node()
548 551 reachable = self.changelog.reachable(latest, minbhrev)
549 552 reachable.remove(latest)
550 553 if reachable:
551 554 bheads = [b for b in bheads if b not in reachable]
552 555 partial[branch] = bheads
553 556
554 557 def lookup(self, key):
555 558 if isinstance(key, int):
556 559 return self.changelog.node(key)
557 560 elif key == '.':
558 561 return self.dirstate.p1()
559 562 elif key == 'null':
560 563 return nullid
561 564 elif key == 'tip':
562 565 return self.changelog.tip()
563 566 n = self.changelog._match(key)
564 567 if n:
565 568 return n
566 569 if key in self._bookmarks:
567 570 return self._bookmarks[key]
568 571 if key in self.tags():
569 572 return self.tags()[key]
570 573 if key in self.branchtags():
571 574 return self.branchtags()[key]
572 575 n = self.changelog._partialmatch(key)
573 576 if n:
574 577 return n
575 578
576 579 # can't find key, check if it might have come from damaged dirstate
577 580 if key in self.dirstate.parents():
578 581 raise error.Abort(_("working directory has unknown parent '%s'!")
579 582 % short(key))
580 583 try:
581 584 if len(key) == 20:
582 585 key = hex(key)
583 586 except TypeError:
584 587 pass
585 588 raise error.RepoLookupError(_("unknown revision '%s'") % key)
586 589
587 590 def lookupbranch(self, key, remote=None):
588 591 repo = remote or self
589 592 if key in repo.branchmap():
590 593 return key
591 594
592 595 repo = (remote and remote.local()) and remote or self
593 596 return repo[key].branch()
594 597
595 598 def known(self, nodes):
596 599 nm = self.changelog.nodemap
597 600 return [(n in nm) for n in nodes]
598 601
599 602 def local(self):
600 603 return self
601 604
602 605 def join(self, f):
603 606 return os.path.join(self.path, f)
604 607
605 608 def wjoin(self, f):
606 609 return os.path.join(self.root, f)
607 610
608 611 def file(self, f):
609 612 if f[0] == '/':
610 613 f = f[1:]
611 614 return filelog.filelog(self.sopener, f)
612 615
613 616 def changectx(self, changeid):
614 617 return self[changeid]
615 618
616 619 def parents(self, changeid=None):
617 620 '''get list of changectxs for parents of changeid'''
618 621 return self[changeid].parents()
619 622
620 623 def filectx(self, path, changeid=None, fileid=None):
621 624 """changeid can be a changeset revision, node, or tag.
622 625 fileid can be a file revision or node."""
623 626 return context.filectx(self, path, changeid, fileid)
624 627
625 628 def getcwd(self):
626 629 return self.dirstate.getcwd()
627 630
628 631 def pathto(self, f, cwd=None):
629 632 return self.dirstate.pathto(f, cwd)
630 633
631 634 def wfile(self, f, mode='r'):
632 635 return self.wopener(f, mode)
633 636
634 637 def _link(self, f):
635 638 return os.path.islink(self.wjoin(f))
636 639
637 640 def _loadfilter(self, filter):
638 641 if filter not in self.filterpats:
639 642 l = []
640 643 for pat, cmd in self.ui.configitems(filter):
641 644 if cmd == '!':
642 645 continue
643 646 mf = matchmod.match(self.root, '', [pat])
644 647 fn = None
645 648 params = cmd
646 649 for name, filterfn in self._datafilters.iteritems():
647 650 if cmd.startswith(name):
648 651 fn = filterfn
649 652 params = cmd[len(name):].lstrip()
650 653 break
651 654 if not fn:
652 655 fn = lambda s, c, **kwargs: util.filter(s, c)
653 656 # Wrap old filters not supporting keyword arguments
654 657 if not inspect.getargspec(fn)[2]:
655 658 oldfn = fn
656 659 fn = lambda s, c, **kwargs: oldfn(s, c)
657 660 l.append((mf, fn, params))
658 661 self.filterpats[filter] = l
659 662 return self.filterpats[filter]
660 663
661 664 def _filter(self, filterpats, filename, data):
662 665 for mf, fn, cmd in filterpats:
663 666 if mf(filename):
664 667 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
665 668 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
666 669 break
667 670
668 671 return data
669 672
670 673 @propertycache
671 674 def _encodefilterpats(self):
672 675 return self._loadfilter('encode')
673 676
674 677 @propertycache
675 678 def _decodefilterpats(self):
676 679 return self._loadfilter('decode')
677 680
678 681 def adddatafilter(self, name, filter):
679 682 self._datafilters[name] = filter
680 683
681 684 def wread(self, filename):
682 685 if self._link(filename):
683 686 data = os.readlink(self.wjoin(filename))
684 687 else:
685 688 data = self.wopener.read(filename)
686 689 return self._filter(self._encodefilterpats, filename, data)
687 690
688 691 def wwrite(self, filename, data, flags):
689 692 data = self._filter(self._decodefilterpats, filename, data)
690 693 if 'l' in flags:
691 694 self.wopener.symlink(data, filename)
692 695 else:
693 696 self.wopener.write(filename, data)
694 697 if 'x' in flags:
695 698 util.setflags(self.wjoin(filename), False, True)
696 699
697 700 def wwritedata(self, filename, data):
698 701 return self._filter(self._decodefilterpats, filename, data)
699 702
700 703 def transaction(self, desc):
701 704 tr = self._transref and self._transref() or None
702 705 if tr and tr.running():
703 706 return tr.nest()
704 707
705 708 # abort here if the journal already exists
706 709 if os.path.exists(self.sjoin("journal")):
707 710 raise error.RepoError(
708 711 _("abandoned transaction found - run hg recover"))
709 712
710 713 journalfiles = self._writejournal(desc)
711 714 renames = [(x, undoname(x)) for x in journalfiles]
712 715
713 716 tr = transaction.transaction(self.ui.warn, self.sopener,
714 717 self.sjoin("journal"),
715 718 aftertrans(renames),
716 719 self.store.createmode)
717 720 self._transref = weakref.ref(tr)
718 721 return tr
719 722
720 723 def _writejournal(self, desc):
721 724 # save dirstate for rollback
722 725 try:
723 726 ds = self.opener.read("dirstate")
724 727 except IOError:
725 728 ds = ""
726 729 self.opener.write("journal.dirstate", ds)
727 730 self.opener.write("journal.branch",
728 731 encoding.fromlocal(self.dirstate.branch()))
729 732 self.opener.write("journal.desc",
730 733 "%d\n%s\n" % (len(self), desc))
731 734
732 735 bkname = self.join('bookmarks')
733 736 if os.path.exists(bkname):
734 737 util.copyfile(bkname, self.join('journal.bookmarks'))
735 738 else:
736 739 self.opener.write('journal.bookmarks', '')
737 740
738 741 return (self.sjoin('journal'), self.join('journal.dirstate'),
739 742 self.join('journal.branch'), self.join('journal.desc'),
740 743 self.join('journal.bookmarks'))
741 744
742 745 def recover(self):
743 746 lock = self.lock()
744 747 try:
745 748 if os.path.exists(self.sjoin("journal")):
746 749 self.ui.status(_("rolling back interrupted transaction\n"))
747 750 transaction.rollback(self.sopener, self.sjoin("journal"),
748 751 self.ui.warn)
749 752 self.invalidate()
750 753 return True
751 754 else:
752 755 self.ui.warn(_("no interrupted transaction available\n"))
753 756 return False
754 757 finally:
755 758 lock.release()
756 759
757 760 def rollback(self, dryrun=False, force=False):
758 761 wlock = lock = None
759 762 try:
760 763 wlock = self.wlock()
761 764 lock = self.lock()
762 765 if os.path.exists(self.sjoin("undo")):
763 766 return self._rollback(dryrun, force)
764 767 else:
765 768 self.ui.warn(_("no rollback information available\n"))
766 769 return 1
767 770 finally:
768 771 release(lock, wlock)
769 772
770 773 def _rollback(self, dryrun, force):
771 774 ui = self.ui
772 775 try:
773 776 args = self.opener.read('undo.desc').splitlines()
774 777 (oldlen, desc, detail) = (int(args[0]), args[1], None)
775 778 if len(args) >= 3:
776 779 detail = args[2]
777 780 oldtip = oldlen - 1
778 781
779 782 if detail and ui.verbose:
780 783 msg = (_('repository tip rolled back to revision %s'
781 784 ' (undo %s: %s)\n')
782 785 % (oldtip, desc, detail))
783 786 else:
784 787 msg = (_('repository tip rolled back to revision %s'
785 788 ' (undo %s)\n')
786 789 % (oldtip, desc))
787 790 except IOError:
788 791 msg = _('rolling back unknown transaction\n')
789 792 desc = None
790 793
791 794 if not force and self['.'] != self['tip'] and desc == 'commit':
792 795 raise util.Abort(
793 796 _('rollback of last commit while not checked out '
794 797 'may lose data'), hint=_('use -f to force'))
795 798
796 799 ui.status(msg)
797 800 if dryrun:
798 801 return 0
799 802
800 803 parents = self.dirstate.parents()
801 804 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
802 805 if os.path.exists(self.join('undo.bookmarks')):
803 806 util.rename(self.join('undo.bookmarks'),
804 807 self.join('bookmarks'))
805 808 self.invalidate()
806 809
807 810 parentgone = (parents[0] not in self.changelog.nodemap or
808 811 parents[1] not in self.changelog.nodemap)
809 812 if parentgone:
810 813 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
811 814 try:
812 815 branch = self.opener.read('undo.branch')
813 816 self.dirstate.setbranch(branch)
814 817 except IOError:
815 818 ui.warn(_('named branch could not be reset: '
816 819 'current branch is still \'%s\'\n')
817 820 % self.dirstate.branch())
818 821
819 822 self.dirstate.invalidate()
820 823 self.destroyed()
821 824 parents = tuple([p.rev() for p in self.parents()])
822 825 if len(parents) > 1:
823 826 ui.status(_('working directory now based on '
824 827 'revisions %d and %d\n') % parents)
825 828 else:
826 829 ui.status(_('working directory now based on '
827 830 'revision %d\n') % parents)
828 831 return 0
829 832
830 833 def invalidatecaches(self):
831 834 try:
832 835 delattr(self, '_tagscache')
833 836 except AttributeError:
834 837 pass
835 838
836 839 self._branchcache = None # in UTF-8
837 840 self._branchcachetip = None
838 841
839 842 def invalidatedirstate(self):
840 843 '''Invalidates the dirstate, causing the next call to dirstate
841 844 to check if it was modified since the last time it was read,
842 845 rereading it if it has.
843 846
844 847 This is different to dirstate.invalidate() that it doesn't always
845 848 rereads the dirstate. Use dirstate.invalidate() if you want to
846 849 explicitly read the dirstate again (i.e. restoring it to a previous
847 850 known good state).'''
848 851 try:
849 852 delattr(self, 'dirstate')
850 853 except AttributeError:
851 854 pass
852 855
853 856 def invalidate(self):
854 857 for k in self._filecache:
855 858 # dirstate is invalidated separately in invalidatedirstate()
856 859 if k == 'dirstate':
857 860 continue
858 861
859 862 try:
860 863 delattr(self, k)
861 864 except AttributeError:
862 865 pass
863 866 self.invalidatecaches()
864 867
865 868 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
866 869 try:
867 870 l = lock.lock(lockname, 0, releasefn, desc=desc)
868 871 except error.LockHeld, inst:
869 872 if not wait:
870 873 raise
871 874 self.ui.warn(_("waiting for lock on %s held by %r\n") %
872 875 (desc, inst.locker))
873 876 # default to 600 seconds timeout
874 877 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
875 878 releasefn, desc=desc)
876 879 if acquirefn:
877 880 acquirefn()
878 881 return l
879 882
880 883 def lock(self, wait=True):
881 884 '''Lock the repository store (.hg/store) and return a weak reference
882 885 to the lock. Use this before modifying the store (e.g. committing or
883 886 stripping). If you are opening a transaction, get a lock as well.)'''
884 887 l = self._lockref and self._lockref()
885 888 if l is not None and l.held:
886 889 l.lock()
887 890 return l
888 891
889 892 def unlock():
890 893 self.store.write()
891 894 for k, ce in self._filecache.items():
892 895 if k == 'dirstate':
893 896 continue
894 897 ce.refresh()
895 898
896 899 l = self._lock(self.sjoin("lock"), wait, unlock,
897 900 self.invalidate, _('repository %s') % self.origroot)
898 901 self._lockref = weakref.ref(l)
899 902 return l
900 903
901 904 def wlock(self, wait=True):
902 905 '''Lock the non-store parts of the repository (everything under
903 906 .hg except .hg/store) and return a weak reference to the lock.
904 907 Use this before modifying files in .hg.'''
905 908 l = self._wlockref and self._wlockref()
906 909 if l is not None and l.held:
907 910 l.lock()
908 911 return l
909 912
910 913 def unlock():
911 914 self.dirstate.write()
912 915 ce = self._filecache.get('dirstate')
913 916 if ce:
914 917 ce.refresh()
915 918
916 919 l = self._lock(self.join("wlock"), wait, unlock,
917 920 self.invalidatedirstate, _('working directory of %s') %
918 921 self.origroot)
919 922 self._wlockref = weakref.ref(l)
920 923 return l
921 924
922 925 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
923 926 """
924 927 commit an individual file as part of a larger transaction
925 928 """
926 929
927 930 fname = fctx.path()
928 931 text = fctx.data()
929 932 flog = self.file(fname)
930 933 fparent1 = manifest1.get(fname, nullid)
931 934 fparent2 = fparent2o = manifest2.get(fname, nullid)
932 935
933 936 meta = {}
934 937 copy = fctx.renamed()
935 938 if copy and copy[0] != fname:
936 939 # Mark the new revision of this file as a copy of another
937 940 # file. This copy data will effectively act as a parent
938 941 # of this new revision. If this is a merge, the first
939 942 # parent will be the nullid (meaning "look up the copy data")
940 943 # and the second one will be the other parent. For example:
941 944 #
942 945 # 0 --- 1 --- 3 rev1 changes file foo
943 946 # \ / rev2 renames foo to bar and changes it
944 947 # \- 2 -/ rev3 should have bar with all changes and
945 948 # should record that bar descends from
946 949 # bar in rev2 and foo in rev1
947 950 #
948 951 # this allows this merge to succeed:
949 952 #
950 953 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
951 954 # \ / merging rev3 and rev4 should use bar@rev2
952 955 # \- 2 --- 4 as the merge base
953 956 #
954 957
955 958 cfname = copy[0]
956 959 crev = manifest1.get(cfname)
957 960 newfparent = fparent2
958 961
959 962 if manifest2: # branch merge
960 963 if fparent2 == nullid or crev is None: # copied on remote side
961 964 if cfname in manifest2:
962 965 crev = manifest2[cfname]
963 966 newfparent = fparent1
964 967
965 968 # find source in nearest ancestor if we've lost track
966 969 if not crev:
967 970 self.ui.debug(" %s: searching for copy revision for %s\n" %
968 971 (fname, cfname))
969 972 for ancestor in self[None].ancestors():
970 973 if cfname in ancestor:
971 974 crev = ancestor[cfname].filenode()
972 975 break
973 976
974 977 if crev:
975 978 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
976 979 meta["copy"] = cfname
977 980 meta["copyrev"] = hex(crev)
978 981 fparent1, fparent2 = nullid, newfparent
979 982 else:
980 983 self.ui.warn(_("warning: can't find ancestor for '%s' "
981 984 "copied from '%s'!\n") % (fname, cfname))
982 985
983 986 elif fparent2 != nullid:
984 987 # is one parent an ancestor of the other?
985 988 fparentancestor = flog.ancestor(fparent1, fparent2)
986 989 if fparentancestor == fparent1:
987 990 fparent1, fparent2 = fparent2, nullid
988 991 elif fparentancestor == fparent2:
989 992 fparent2 = nullid
990 993
991 994 # is the file changed?
992 995 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
993 996 changelist.append(fname)
994 997 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
995 998
996 999 # are just the flags changed during merge?
997 1000 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
998 1001 changelist.append(fname)
999 1002
1000 1003 return fparent1
1001 1004
1002 1005 def commit(self, text="", user=None, date=None, match=None, force=False,
1003 1006 editor=False, extra={}):
1004 1007 """Add a new revision to current repository.
1005 1008
1006 1009 Revision information is gathered from the working directory,
1007 1010 match can be used to filter the committed files. If editor is
1008 1011 supplied, it is called to get a commit message.
1009 1012 """
1010 1013
1011 1014 def fail(f, msg):
1012 1015 raise util.Abort('%s: %s' % (f, msg))
1013 1016
1014 1017 if not match:
1015 1018 match = matchmod.always(self.root, '')
1016 1019
1017 1020 if not force:
1018 1021 vdirs = []
1019 1022 match.dir = vdirs.append
1020 1023 match.bad = fail
1021 1024
1022 1025 wlock = self.wlock()
1023 1026 try:
1024 1027 wctx = self[None]
1025 1028 merge = len(wctx.parents()) > 1
1026 1029
1027 1030 if (not force and merge and match and
1028 1031 (match.files() or match.anypats())):
1029 1032 raise util.Abort(_('cannot partially commit a merge '
1030 1033 '(do not specify files or patterns)'))
1031 1034
1032 1035 changes = self.status(match=match, clean=force)
1033 1036 if force:
1034 1037 changes[0].extend(changes[6]) # mq may commit unchanged files
1035 1038
1036 1039 # check subrepos
1037 1040 subs = []
1038 1041 removedsubs = set()
1039 1042 if '.hgsub' in wctx:
1040 1043 # only manage subrepos and .hgsubstate if .hgsub is present
1041 1044 for p in wctx.parents():
1042 1045 removedsubs.update(s for s in p.substate if match(s))
1043 1046 for s in wctx.substate:
1044 1047 removedsubs.discard(s)
1045 1048 if match(s) and wctx.sub(s).dirty():
1046 1049 subs.append(s)
1047 1050 if (subs or removedsubs):
1048 1051 if (not match('.hgsub') and
1049 1052 '.hgsub' in (wctx.modified() + wctx.added())):
1050 1053 raise util.Abort(
1051 1054 _("can't commit subrepos without .hgsub"))
1052 1055 if '.hgsubstate' not in changes[0]:
1053 1056 changes[0].insert(0, '.hgsubstate')
1054 1057 if '.hgsubstate' in changes[2]:
1055 1058 changes[2].remove('.hgsubstate')
1056 1059 elif '.hgsub' in changes[2]:
1057 1060 # clean up .hgsubstate when .hgsub is removed
1058 1061 if ('.hgsubstate' in wctx and
1059 1062 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1060 1063 changes[2].insert(0, '.hgsubstate')
1061 1064
1062 1065 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1063 1066 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1064 1067 if changedsubs:
1065 1068 raise util.Abort(_("uncommitted changes in subrepo %s")
1066 1069 % changedsubs[0])
1067 1070
1068 1071 # make sure all explicit patterns are matched
1069 1072 if not force and match.files():
1070 1073 matched = set(changes[0] + changes[1] + changes[2])
1071 1074
1072 1075 for f in match.files():
1073 1076 if f == '.' or f in matched or f in wctx.substate:
1074 1077 continue
1075 1078 if f in changes[3]: # missing
1076 1079 fail(f, _('file not found!'))
1077 1080 if f in vdirs: # visited directory
1078 1081 d = f + '/'
1079 1082 for mf in matched:
1080 1083 if mf.startswith(d):
1081 1084 break
1082 1085 else:
1083 1086 fail(f, _("no match under directory!"))
1084 1087 elif f not in self.dirstate:
1085 1088 fail(f, _("file not tracked!"))
1086 1089
1087 1090 if (not force and not extra.get("close") and not merge
1088 1091 and not (changes[0] or changes[1] or changes[2])
1089 1092 and wctx.branch() == wctx.p1().branch()):
1090 1093 return None
1091 1094
1092 1095 ms = mergemod.mergestate(self)
1093 1096 for f in changes[0]:
1094 1097 if f in ms and ms[f] == 'u':
1095 1098 raise util.Abort(_("unresolved merge conflicts "
1096 1099 "(see hg help resolve)"))
1097 1100
1098 1101 cctx = context.workingctx(self, text, user, date, extra, changes)
1099 1102 if editor:
1100 1103 cctx._text = editor(self, cctx, subs)
1101 1104 edited = (text != cctx._text)
1102 1105
1103 1106 # commit subs
1104 1107 if subs or removedsubs:
1105 1108 state = wctx.substate.copy()
1106 1109 for s in sorted(subs):
1107 1110 sub = wctx.sub(s)
1108 1111 self.ui.status(_('committing subrepository %s\n') %
1109 1112 subrepo.subrelpath(sub))
1110 1113 sr = sub.commit(cctx._text, user, date)
1111 1114 state[s] = (state[s][0], sr)
1112 1115 subrepo.writestate(self, state)
1113 1116
1114 1117 # Save commit message in case this transaction gets rolled back
1115 1118 # (e.g. by a pretxncommit hook). Leave the content alone on
1116 1119 # the assumption that the user will use the same editor again.
1117 1120 msgfn = self.savecommitmessage(cctx._text)
1118 1121
1119 1122 p1, p2 = self.dirstate.parents()
1120 1123 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1121 1124 try:
1122 1125 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1123 1126 ret = self.commitctx(cctx, True)
1124 1127 except:
1125 1128 if edited:
1126 1129 self.ui.write(
1127 1130 _('note: commit message saved in %s\n') % msgfn)
1128 1131 raise
1129 1132
1130 1133 # update bookmarks, dirstate and mergestate
1131 1134 bookmarks.update(self, p1, ret)
1132 1135 for f in changes[0] + changes[1]:
1133 1136 self.dirstate.normal(f)
1134 1137 for f in changes[2]:
1135 1138 self.dirstate.drop(f)
1136 1139 self.dirstate.setparents(ret)
1137 1140 ms.reset()
1138 1141 finally:
1139 1142 wlock.release()
1140 1143
1141 1144 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1142 1145 return ret
1143 1146
1144 1147 def commitctx(self, ctx, error=False):
1145 1148 """Add a new revision to current repository.
1146 1149 Revision information is passed via the context argument.
1147 1150 """
1148 1151
1149 1152 tr = lock = None
1150 1153 removed = list(ctx.removed())
1151 1154 p1, p2 = ctx.p1(), ctx.p2()
1152 1155 user = ctx.user()
1153 1156
1154 1157 lock = self.lock()
1155 1158 try:
1156 1159 tr = self.transaction("commit")
1157 1160 trp = weakref.proxy(tr)
1158 1161
1159 1162 if ctx.files():
1160 1163 m1 = p1.manifest().copy()
1161 1164 m2 = p2.manifest()
1162 1165
1163 1166 # check in files
1164 1167 new = {}
1165 1168 changed = []
1166 1169 linkrev = len(self)
1167 1170 for f in sorted(ctx.modified() + ctx.added()):
1168 1171 self.ui.note(f + "\n")
1169 1172 try:
1170 1173 fctx = ctx[f]
1171 1174 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1172 1175 changed)
1173 1176 m1.set(f, fctx.flags())
1174 1177 except OSError, inst:
1175 1178 self.ui.warn(_("trouble committing %s!\n") % f)
1176 1179 raise
1177 1180 except IOError, inst:
1178 1181 errcode = getattr(inst, 'errno', errno.ENOENT)
1179 1182 if error or errcode and errcode != errno.ENOENT:
1180 1183 self.ui.warn(_("trouble committing %s!\n") % f)
1181 1184 raise
1182 1185 else:
1183 1186 removed.append(f)
1184 1187
1185 1188 # update manifest
1186 1189 m1.update(new)
1187 1190 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1188 1191 drop = [f for f in removed if f in m1]
1189 1192 for f in drop:
1190 1193 del m1[f]
1191 1194 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1192 1195 p2.manifestnode(), (new, drop))
1193 1196 files = changed + removed
1194 1197 else:
1195 1198 mn = p1.manifestnode()
1196 1199 files = []
1197 1200
1198 1201 # update changelog
1199 1202 self.changelog.delayupdate()
1200 1203 n = self.changelog.add(mn, files, ctx.description(),
1201 1204 trp, p1.node(), p2.node(),
1202 1205 user, ctx.date(), ctx.extra().copy())
1203 1206 p = lambda: self.changelog.writepending() and self.root or ""
1204 1207 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1205 1208 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1206 1209 parent2=xp2, pending=p)
1207 1210 self.changelog.finalize(trp)
1208 1211 tr.close()
1209 1212
1210 1213 if self._branchcache:
1211 1214 self.updatebranchcache()
1212 1215 return n
1213 1216 finally:
1214 1217 if tr:
1215 1218 tr.release()
1216 1219 lock.release()
1217 1220
1218 1221 def destroyed(self):
1219 1222 '''Inform the repository that nodes have been destroyed.
1220 1223 Intended for use by strip and rollback, so there's a common
1221 1224 place for anything that has to be done after destroying history.'''
1222 1225 # XXX it might be nice if we could take the list of destroyed
1223 1226 # nodes, but I don't see an easy way for rollback() to do that
1224 1227
1225 1228 # Ensure the persistent tag cache is updated. Doing it now
1226 1229 # means that the tag cache only has to worry about destroyed
1227 1230 # heads immediately after a strip/rollback. That in turn
1228 1231 # guarantees that "cachetip == currenttip" (comparing both rev
1229 1232 # and node) always means no nodes have been added or destroyed.
1230 1233
1231 1234 # XXX this is suboptimal when qrefresh'ing: we strip the current
1232 1235 # head, refresh the tag cache, then immediately add a new head.
1233 1236 # But I think doing it this way is necessary for the "instant
1234 1237 # tag cache retrieval" case to work.
1235 1238 self.invalidatecaches()
1236 1239
1237 1240 def walk(self, match, node=None):
1238 1241 '''
1239 1242 walk recursively through the directory tree or a given
1240 1243 changeset, finding all files matched by the match
1241 1244 function
1242 1245 '''
1243 1246 return self[node].walk(match)
1244 1247
1245 1248 def status(self, node1='.', node2=None, match=None,
1246 1249 ignored=False, clean=False, unknown=False,
1247 1250 listsubrepos=False):
1248 1251 """return status of files between two nodes or node and working directory
1249 1252
1250 1253 If node1 is None, use the first dirstate parent instead.
1251 1254 If node2 is None, compare node1 with working directory.
1252 1255 """
1253 1256
1254 1257 def mfmatches(ctx):
1255 1258 mf = ctx.manifest().copy()
1256 1259 for fn in mf.keys():
1257 1260 if not match(fn):
1258 1261 del mf[fn]
1259 1262 return mf
1260 1263
1261 1264 if isinstance(node1, context.changectx):
1262 1265 ctx1 = node1
1263 1266 else:
1264 1267 ctx1 = self[node1]
1265 1268 if isinstance(node2, context.changectx):
1266 1269 ctx2 = node2
1267 1270 else:
1268 1271 ctx2 = self[node2]
1269 1272
1270 1273 working = ctx2.rev() is None
1271 1274 parentworking = working and ctx1 == self['.']
1272 1275 match = match or matchmod.always(self.root, self.getcwd())
1273 1276 listignored, listclean, listunknown = ignored, clean, unknown
1274 1277
1275 1278 # load earliest manifest first for caching reasons
1276 1279 if not working and ctx2.rev() < ctx1.rev():
1277 1280 ctx2.manifest()
1278 1281
1279 1282 if not parentworking:
1280 1283 def bad(f, msg):
1281 1284 if f not in ctx1:
1282 1285 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1283 1286 match.bad = bad
1284 1287
1285 1288 if working: # we need to scan the working dir
1286 1289 subrepos = []
1287 1290 if '.hgsub' in self.dirstate:
1288 1291 subrepos = ctx2.substate.keys()
1289 1292 s = self.dirstate.status(match, subrepos, listignored,
1290 1293 listclean, listunknown)
1291 1294 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1292 1295
1293 1296 # check for any possibly clean files
1294 1297 if parentworking and cmp:
1295 1298 fixup = []
1296 1299 # do a full compare of any files that might have changed
1297 1300 for f in sorted(cmp):
1298 1301 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1299 1302 or ctx1[f].cmp(ctx2[f])):
1300 1303 modified.append(f)
1301 1304 else:
1302 1305 fixup.append(f)
1303 1306
1304 1307 # update dirstate for files that are actually clean
1305 1308 if fixup:
1306 1309 if listclean:
1307 1310 clean += fixup
1308 1311
1309 1312 try:
1310 1313 # updating the dirstate is optional
1311 1314 # so we don't wait on the lock
1312 1315 wlock = self.wlock(False)
1313 1316 try:
1314 1317 for f in fixup:
1315 1318 self.dirstate.normal(f)
1316 1319 finally:
1317 1320 wlock.release()
1318 1321 except error.LockError:
1319 1322 pass
1320 1323
1321 1324 if not parentworking:
1322 1325 mf1 = mfmatches(ctx1)
1323 1326 if working:
1324 1327 # we are comparing working dir against non-parent
1325 1328 # generate a pseudo-manifest for the working dir
1326 1329 mf2 = mfmatches(self['.'])
1327 1330 for f in cmp + modified + added:
1328 1331 mf2[f] = None
1329 1332 mf2.set(f, ctx2.flags(f))
1330 1333 for f in removed:
1331 1334 if f in mf2:
1332 1335 del mf2[f]
1333 1336 else:
1334 1337 # we are comparing two revisions
1335 1338 deleted, unknown, ignored = [], [], []
1336 1339 mf2 = mfmatches(ctx2)
1337 1340
1338 1341 modified, added, clean = [], [], []
1339 1342 for fn in mf2:
1340 1343 if fn in mf1:
1341 1344 if (fn not in deleted and
1342 1345 (mf1.flags(fn) != mf2.flags(fn) or
1343 1346 (mf1[fn] != mf2[fn] and
1344 1347 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1345 1348 modified.append(fn)
1346 1349 elif listclean:
1347 1350 clean.append(fn)
1348 1351 del mf1[fn]
1349 1352 elif fn not in deleted:
1350 1353 added.append(fn)
1351 1354 removed = mf1.keys()
1352 1355
1353 1356 r = modified, added, removed, deleted, unknown, ignored, clean
1354 1357
1355 1358 if listsubrepos:
1356 1359 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1357 1360 if working:
1358 1361 rev2 = None
1359 1362 else:
1360 1363 rev2 = ctx2.substate[subpath][1]
1361 1364 try:
1362 1365 submatch = matchmod.narrowmatcher(subpath, match)
1363 1366 s = sub.status(rev2, match=submatch, ignored=listignored,
1364 1367 clean=listclean, unknown=listunknown,
1365 1368 listsubrepos=True)
1366 1369 for rfiles, sfiles in zip(r, s):
1367 1370 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1368 1371 except error.LookupError:
1369 1372 self.ui.status(_("skipping missing subrepository: %s\n")
1370 1373 % subpath)
1371 1374
1372 1375 for l in r:
1373 1376 l.sort()
1374 1377 return r
1375 1378
1376 1379 def heads(self, start=None):
1377 1380 heads = self.changelog.heads(start)
1378 1381 # sort the output in rev descending order
1379 1382 return sorted(heads, key=self.changelog.rev, reverse=True)
1380 1383
1381 1384 def branchheads(self, branch=None, start=None, closed=False):
1382 1385 '''return a (possibly filtered) list of heads for the given branch
1383 1386
1384 1387 Heads are returned in topological order, from newest to oldest.
1385 1388 If branch is None, use the dirstate branch.
1386 1389 If start is not None, return only heads reachable from start.
1387 1390 If closed is True, return heads that are marked as closed as well.
1388 1391 '''
1389 1392 if branch is None:
1390 1393 branch = self[None].branch()
1391 1394 branches = self.branchmap()
1392 1395 if branch not in branches:
1393 1396 return []
1394 1397 # the cache returns heads ordered lowest to highest
1395 1398 bheads = list(reversed(branches[branch]))
1396 1399 if start is not None:
1397 1400 # filter out the heads that cannot be reached from startrev
1398 1401 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1399 1402 bheads = [h for h in bheads if h in fbheads]
1400 1403 if not closed:
1401 1404 bheads = [h for h in bheads if
1402 1405 ('close' not in self.changelog.read(h)[5])]
1403 1406 return bheads
1404 1407
1405 1408 def branches(self, nodes):
1406 1409 if not nodes:
1407 1410 nodes = [self.changelog.tip()]
1408 1411 b = []
1409 1412 for n in nodes:
1410 1413 t = n
1411 1414 while True:
1412 1415 p = self.changelog.parents(n)
1413 1416 if p[1] != nullid or p[0] == nullid:
1414 1417 b.append((t, n, p[0], p[1]))
1415 1418 break
1416 1419 n = p[0]
1417 1420 return b
1418 1421
1419 1422 def between(self, pairs):
1420 1423 r = []
1421 1424
1422 1425 for top, bottom in pairs:
1423 1426 n, l, i = top, [], 0
1424 1427 f = 1
1425 1428
1426 1429 while n != bottom and n != nullid:
1427 1430 p = self.changelog.parents(n)[0]
1428 1431 if i == f:
1429 1432 l.append(n)
1430 1433 f = f * 2
1431 1434 n = p
1432 1435 i += 1
1433 1436
1434 1437 r.append(l)
1435 1438
1436 1439 return r
1437 1440
1438 1441 def pull(self, remote, heads=None, force=False):
1439 1442 lock = self.lock()
1440 1443 try:
1441 1444 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1442 1445 force=force)
1443 1446 common, fetch, rheads = tmp
1444 1447 if not fetch:
1445 1448 self.ui.status(_("no changes found\n"))
1446 1449 result = 0
1447 1450 else:
1448 1451 if heads is None and list(common) == [nullid]:
1449 1452 self.ui.status(_("requesting all changes\n"))
1450 1453 elif heads is None and remote.capable('changegroupsubset'):
1451 1454 # issue1320, avoid a race if remote changed after discovery
1452 1455 heads = rheads
1453 1456
1454 1457 if remote.capable('getbundle'):
1455 1458 cg = remote.getbundle('pull', common=common,
1456 1459 heads=heads or rheads)
1457 1460 elif heads is None:
1458 1461 cg = remote.changegroup(fetch, 'pull')
1459 1462 elif not remote.capable('changegroupsubset'):
1460 1463 raise util.Abort(_("partial pull cannot be done because "
1461 1464 "other repository doesn't support "
1462 1465 "changegroupsubset."))
1463 1466 else:
1464 1467 cg = remote.changegroupsubset(fetch, heads, 'pull')
1465 1468 result = self.addchangegroup(cg, 'pull', remote.url(),
1466 1469 lock=lock)
1467 1470 finally:
1468 1471 lock.release()
1469 1472
1470 1473 return result
1471 1474
1472 1475 def checkpush(self, force, revs):
1473 1476 """Extensions can override this function if additional checks have
1474 1477 to be performed before pushing, or call it if they override push
1475 1478 command.
1476 1479 """
1477 1480 pass
1478 1481
1479 1482 def push(self, remote, force=False, revs=None, newbranch=False):
1480 1483 '''Push outgoing changesets (limited by revs) from the current
1481 1484 repository to remote. Return an integer:
1482 1485 - 0 means HTTP error *or* nothing to push
1483 1486 - 1 means we pushed and remote head count is unchanged *or*
1484 1487 we have outgoing changesets but refused to push
1485 1488 - other values as described by addchangegroup()
1486 1489 '''
1487 1490 # there are two ways to push to remote repo:
1488 1491 #
1489 1492 # addchangegroup assumes local user can lock remote
1490 1493 # repo (local filesystem, old ssh servers).
1491 1494 #
1492 1495 # unbundle assumes local user cannot lock remote repo (new ssh
1493 1496 # servers, http servers).
1494 1497
1495 1498 self.checkpush(force, revs)
1496 1499 lock = None
1497 1500 unbundle = remote.capable('unbundle')
1498 1501 if not unbundle:
1499 1502 lock = remote.lock()
1500 1503 try:
1501 1504 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1502 1505 newbranch)
1503 1506 ret = remote_heads
1504 1507 if cg is not None:
1505 1508 if unbundle:
1506 1509 # local repo finds heads on server, finds out what
1507 1510 # revs it must push. once revs transferred, if server
1508 1511 # finds it has different heads (someone else won
1509 1512 # commit/push race), server aborts.
1510 1513 if force:
1511 1514 remote_heads = ['force']
1512 1515 # ssh: return remote's addchangegroup()
1513 1516 # http: return remote's addchangegroup() or 0 for error
1514 1517 ret = remote.unbundle(cg, remote_heads, 'push')
1515 1518 else:
1516 1519 # we return an integer indicating remote head count change
1517 1520 ret = remote.addchangegroup(cg, 'push', self.url(),
1518 1521 lock=lock)
1519 1522 finally:
1520 1523 if lock is not None:
1521 1524 lock.release()
1522 1525
1523 1526 self.ui.debug("checking for updated bookmarks\n")
1524 1527 rb = remote.listkeys('bookmarks')
1525 1528 for k in rb.keys():
1526 1529 if k in self._bookmarks:
1527 1530 nr, nl = rb[k], hex(self._bookmarks[k])
1528 1531 if nr in self:
1529 1532 cr = self[nr]
1530 1533 cl = self[nl]
1531 1534 if cl in cr.descendants():
1532 1535 r = remote.pushkey('bookmarks', k, nr, nl)
1533 1536 if r:
1534 1537 self.ui.status(_("updating bookmark %s\n") % k)
1535 1538 else:
1536 1539 self.ui.warn(_('updating bookmark %s'
1537 1540 ' failed!\n') % k)
1538 1541
1539 1542 return ret
1540 1543
1541 1544 def changegroupinfo(self, nodes, source):
1542 1545 if self.ui.verbose or source == 'bundle':
1543 1546 self.ui.status(_("%d changesets found\n") % len(nodes))
1544 1547 if self.ui.debugflag:
1545 1548 self.ui.debug("list of changesets:\n")
1546 1549 for node in nodes:
1547 1550 self.ui.debug("%s\n" % hex(node))
1548 1551
1549 1552 def changegroupsubset(self, bases, heads, source):
1550 1553 """Compute a changegroup consisting of all the nodes that are
1551 1554 descendants of any of the bases and ancestors of any of the heads.
1552 1555 Return a chunkbuffer object whose read() method will return
1553 1556 successive changegroup chunks.
1554 1557
1555 1558 It is fairly complex as determining which filenodes and which
1556 1559 manifest nodes need to be included for the changeset to be complete
1557 1560 is non-trivial.
1558 1561
1559 1562 Another wrinkle is doing the reverse, figuring out which changeset in
1560 1563 the changegroup a particular filenode or manifestnode belongs to.
1561 1564 """
1562 1565 cl = self.changelog
1563 1566 if not bases:
1564 1567 bases = [nullid]
1565 1568 csets, bases, heads = cl.nodesbetween(bases, heads)
1566 1569 # We assume that all ancestors of bases are known
1567 1570 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1568 1571 return self._changegroupsubset(common, csets, heads, source)
1569 1572
1570 1573 def getbundle(self, source, heads=None, common=None):
1571 1574 """Like changegroupsubset, but returns the set difference between the
1572 1575 ancestors of heads and the ancestors common.
1573 1576
1574 1577 If heads is None, use the local heads. If common is None, use [nullid].
1575 1578
1576 1579 The nodes in common might not all be known locally due to the way the
1577 1580 current discovery protocol works.
1578 1581 """
1579 1582 cl = self.changelog
1580 1583 if common:
1581 1584 nm = cl.nodemap
1582 1585 common = [n for n in common if n in nm]
1583 1586 else:
1584 1587 common = [nullid]
1585 1588 if not heads:
1586 1589 heads = cl.heads()
1587 1590 common, missing = cl.findcommonmissing(common, heads)
1588 1591 if not missing:
1589 1592 return None
1590 1593 return self._changegroupsubset(common, missing, heads, source)
1591 1594
1592 1595 def _changegroupsubset(self, commonrevs, csets, heads, source):
1593 1596
1594 1597 cl = self.changelog
1595 1598 mf = self.manifest
1596 1599 mfs = {} # needed manifests
1597 1600 fnodes = {} # needed file nodes
1598 1601 changedfiles = set()
1599 1602 fstate = ['', {}]
1600 1603 count = [0]
1601 1604
1602 1605 # can we go through the fast path ?
1603 1606 heads.sort()
1604 1607 if heads == sorted(self.heads()):
1605 1608 return self._changegroup(csets, source)
1606 1609
1607 1610 # slow path
1608 1611 self.hook('preoutgoing', throw=True, source=source)
1609 1612 self.changegroupinfo(csets, source)
1610 1613
1611 1614 # filter any nodes that claim to be part of the known set
1612 1615 def prune(revlog, missing):
1613 1616 return [n for n in missing
1614 1617 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1615 1618
1616 1619 def lookup(revlog, x):
1617 1620 if revlog == cl:
1618 1621 c = cl.read(x)
1619 1622 changedfiles.update(c[3])
1620 1623 mfs.setdefault(c[0], x)
1621 1624 count[0] += 1
1622 1625 self.ui.progress(_('bundling'), count[0],
1623 1626 unit=_('changesets'), total=len(csets))
1624 1627 return x
1625 1628 elif revlog == mf:
1626 1629 clnode = mfs[x]
1627 1630 mdata = mf.readfast(x)
1628 1631 for f in changedfiles:
1629 1632 if f in mdata:
1630 1633 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1631 1634 count[0] += 1
1632 1635 self.ui.progress(_('bundling'), count[0],
1633 1636 unit=_('manifests'), total=len(mfs))
1634 1637 return mfs[x]
1635 1638 else:
1636 1639 self.ui.progress(
1637 1640 _('bundling'), count[0], item=fstate[0],
1638 1641 unit=_('files'), total=len(changedfiles))
1639 1642 return fstate[1][x]
1640 1643
1641 1644 bundler = changegroup.bundle10(lookup)
1642 1645 reorder = self.ui.config('bundle', 'reorder', 'auto')
1643 1646 if reorder == 'auto':
1644 1647 reorder = None
1645 1648 else:
1646 1649 reorder = util.parsebool(reorder)
1647 1650
1648 1651 def gengroup():
1649 1652 # Create a changenode group generator that will call our functions
1650 1653 # back to lookup the owning changenode and collect information.
1651 1654 for chunk in cl.group(csets, bundler, reorder=reorder):
1652 1655 yield chunk
1653 1656 self.ui.progress(_('bundling'), None)
1654 1657
1655 1658 # Create a generator for the manifestnodes that calls our lookup
1656 1659 # and data collection functions back.
1657 1660 count[0] = 0
1658 1661 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1659 1662 yield chunk
1660 1663 self.ui.progress(_('bundling'), None)
1661 1664
1662 1665 mfs.clear()
1663 1666
1664 1667 # Go through all our files in order sorted by name.
1665 1668 count[0] = 0
1666 1669 for fname in sorted(changedfiles):
1667 1670 filerevlog = self.file(fname)
1668 1671 if not len(filerevlog):
1669 1672 raise util.Abort(_("empty or missing revlog for %s") % fname)
1670 1673 fstate[0] = fname
1671 1674 fstate[1] = fnodes.pop(fname, {})
1672 1675
1673 1676 nodelist = prune(filerevlog, fstate[1])
1674 1677 if nodelist:
1675 1678 count[0] += 1
1676 1679 yield bundler.fileheader(fname)
1677 1680 for chunk in filerevlog.group(nodelist, bundler, reorder):
1678 1681 yield chunk
1679 1682
1680 1683 # Signal that no more groups are left.
1681 1684 yield bundler.close()
1682 1685 self.ui.progress(_('bundling'), None)
1683 1686
1684 1687 if csets:
1685 1688 self.hook('outgoing', node=hex(csets[0]), source=source)
1686 1689
1687 1690 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1688 1691
1689 1692 def changegroup(self, basenodes, source):
1690 1693 # to avoid a race we use changegroupsubset() (issue1320)
1691 1694 return self.changegroupsubset(basenodes, self.heads(), source)
1692 1695
1693 1696 def _changegroup(self, nodes, source):
1694 1697 """Compute the changegroup of all nodes that we have that a recipient
1695 1698 doesn't. Return a chunkbuffer object whose read() method will return
1696 1699 successive changegroup chunks.
1697 1700
1698 1701 This is much easier than the previous function as we can assume that
1699 1702 the recipient has any changenode we aren't sending them.
1700 1703
1701 1704 nodes is the set of nodes to send"""
1702 1705
1703 1706 cl = self.changelog
1704 1707 mf = self.manifest
1705 1708 mfs = {}
1706 1709 changedfiles = set()
1707 1710 fstate = ['']
1708 1711 count = [0]
1709 1712
1710 1713 self.hook('preoutgoing', throw=True, source=source)
1711 1714 self.changegroupinfo(nodes, source)
1712 1715
1713 1716 revset = set([cl.rev(n) for n in nodes])
1714 1717
1715 1718 def gennodelst(log):
1716 1719 return [log.node(r) for r in log if log.linkrev(r) in revset]
1717 1720
1718 1721 def lookup(revlog, x):
1719 1722 if revlog == cl:
1720 1723 c = cl.read(x)
1721 1724 changedfiles.update(c[3])
1722 1725 mfs.setdefault(c[0], x)
1723 1726 count[0] += 1
1724 1727 self.ui.progress(_('bundling'), count[0],
1725 1728 unit=_('changesets'), total=len(nodes))
1726 1729 return x
1727 1730 elif revlog == mf:
1728 1731 count[0] += 1
1729 1732 self.ui.progress(_('bundling'), count[0],
1730 1733 unit=_('manifests'), total=len(mfs))
1731 1734 return cl.node(revlog.linkrev(revlog.rev(x)))
1732 1735 else:
1733 1736 self.ui.progress(
1734 1737 _('bundling'), count[0], item=fstate[0],
1735 1738 total=len(changedfiles), unit=_('files'))
1736 1739 return cl.node(revlog.linkrev(revlog.rev(x)))
1737 1740
1738 1741 bundler = changegroup.bundle10(lookup)
1739 1742 reorder = self.ui.config('bundle', 'reorder', 'auto')
1740 1743 if reorder == 'auto':
1741 1744 reorder = None
1742 1745 else:
1743 1746 reorder = util.parsebool(reorder)
1744 1747
1745 1748 def gengroup():
1746 1749 '''yield a sequence of changegroup chunks (strings)'''
1747 1750 # construct a list of all changed files
1748 1751
1749 1752 for chunk in cl.group(nodes, bundler, reorder=reorder):
1750 1753 yield chunk
1751 1754 self.ui.progress(_('bundling'), None)
1752 1755
1753 1756 count[0] = 0
1754 1757 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1755 1758 yield chunk
1756 1759 self.ui.progress(_('bundling'), None)
1757 1760
1758 1761 count[0] = 0
1759 1762 for fname in sorted(changedfiles):
1760 1763 filerevlog = self.file(fname)
1761 1764 if not len(filerevlog):
1762 1765 raise util.Abort(_("empty or missing revlog for %s") % fname)
1763 1766 fstate[0] = fname
1764 1767 nodelist = gennodelst(filerevlog)
1765 1768 if nodelist:
1766 1769 count[0] += 1
1767 1770 yield bundler.fileheader(fname)
1768 1771 for chunk in filerevlog.group(nodelist, bundler, reorder):
1769 1772 yield chunk
1770 1773 yield bundler.close()
1771 1774 self.ui.progress(_('bundling'), None)
1772 1775
1773 1776 if nodes:
1774 1777 self.hook('outgoing', node=hex(nodes[0]), source=source)
1775 1778
1776 1779 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1777 1780
1778 1781 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1779 1782 """Add the changegroup returned by source.read() to this repo.
1780 1783 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1781 1784 the URL of the repo where this changegroup is coming from.
1782 1785 If lock is not None, the function takes ownership of the lock
1783 1786 and releases it after the changegroup is added.
1784 1787
1785 1788 Return an integer summarizing the change to this repo:
1786 1789 - nothing changed or no source: 0
1787 1790 - more heads than before: 1+added heads (2..n)
1788 1791 - fewer heads than before: -1-removed heads (-2..-n)
1789 1792 - number of heads stays the same: 1
1790 1793 """
1791 1794 def csmap(x):
1792 1795 self.ui.debug("add changeset %s\n" % short(x))
1793 1796 return len(cl)
1794 1797
1795 1798 def revmap(x):
1796 1799 return cl.rev(x)
1797 1800
1798 1801 if not source:
1799 1802 return 0
1800 1803
1801 1804 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1802 1805
1803 1806 changesets = files = revisions = 0
1804 1807 efiles = set()
1805 1808
1806 1809 # write changelog data to temp files so concurrent readers will not see
1807 1810 # inconsistent view
1808 1811 cl = self.changelog
1809 1812 cl.delayupdate()
1810 1813 oldheads = cl.heads()
1811 1814
1812 1815 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1813 1816 try:
1814 1817 trp = weakref.proxy(tr)
1815 1818 # pull off the changeset group
1816 1819 self.ui.status(_("adding changesets\n"))
1817 1820 clstart = len(cl)
1818 1821 class prog(object):
1819 1822 step = _('changesets')
1820 1823 count = 1
1821 1824 ui = self.ui
1822 1825 total = None
1823 1826 def __call__(self):
1824 1827 self.ui.progress(self.step, self.count, unit=_('chunks'),
1825 1828 total=self.total)
1826 1829 self.count += 1
1827 1830 pr = prog()
1828 1831 source.callback = pr
1829 1832
1830 1833 source.changelogheader()
1831 1834 if (cl.addgroup(source, csmap, trp) is None
1832 1835 and not emptyok):
1833 1836 raise util.Abort(_("received changelog group is empty"))
1834 1837 clend = len(cl)
1835 1838 changesets = clend - clstart
1836 1839 for c in xrange(clstart, clend):
1837 1840 efiles.update(self[c].files())
1838 1841 efiles = len(efiles)
1839 1842 self.ui.progress(_('changesets'), None)
1840 1843
1841 1844 # pull off the manifest group
1842 1845 self.ui.status(_("adding manifests\n"))
1843 1846 pr.step = _('manifests')
1844 1847 pr.count = 1
1845 1848 pr.total = changesets # manifests <= changesets
1846 1849 # no need to check for empty manifest group here:
1847 1850 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1848 1851 # no new manifest will be created and the manifest group will
1849 1852 # be empty during the pull
1850 1853 source.manifestheader()
1851 1854 self.manifest.addgroup(source, revmap, trp)
1852 1855 self.ui.progress(_('manifests'), None)
1853 1856
1854 1857 needfiles = {}
1855 1858 if self.ui.configbool('server', 'validate', default=False):
1856 1859 # validate incoming csets have their manifests
1857 1860 for cset in xrange(clstart, clend):
1858 1861 mfest = self.changelog.read(self.changelog.node(cset))[0]
1859 1862 mfest = self.manifest.readdelta(mfest)
1860 1863 # store file nodes we must see
1861 1864 for f, n in mfest.iteritems():
1862 1865 needfiles.setdefault(f, set()).add(n)
1863 1866
1864 1867 # process the files
1865 1868 self.ui.status(_("adding file changes\n"))
1866 1869 pr.step = _('files')
1867 1870 pr.count = 1
1868 1871 pr.total = efiles
1869 1872 source.callback = None
1870 1873
1871 1874 while True:
1872 1875 chunkdata = source.filelogheader()
1873 1876 if not chunkdata:
1874 1877 break
1875 1878 f = chunkdata["filename"]
1876 1879 self.ui.debug("adding %s revisions\n" % f)
1877 1880 pr()
1878 1881 fl = self.file(f)
1879 1882 o = len(fl)
1880 1883 if fl.addgroup(source, revmap, trp) is None:
1881 1884 raise util.Abort(_("received file revlog group is empty"))
1882 1885 revisions += len(fl) - o
1883 1886 files += 1
1884 1887 if f in needfiles:
1885 1888 needs = needfiles[f]
1886 1889 for new in xrange(o, len(fl)):
1887 1890 n = fl.node(new)
1888 1891 if n in needs:
1889 1892 needs.remove(n)
1890 1893 if not needs:
1891 1894 del needfiles[f]
1892 1895 self.ui.progress(_('files'), None)
1893 1896
1894 1897 for f, needs in needfiles.iteritems():
1895 1898 fl = self.file(f)
1896 1899 for n in needs:
1897 1900 try:
1898 1901 fl.rev(n)
1899 1902 except error.LookupError:
1900 1903 raise util.Abort(
1901 1904 _('missing file data for %s:%s - run hg verify') %
1902 1905 (f, hex(n)))
1903 1906
1904 1907 dh = 0
1905 1908 if oldheads:
1906 1909 heads = cl.heads()
1907 1910 dh = len(heads) - len(oldheads)
1908 1911 for h in heads:
1909 1912 if h not in oldheads and 'close' in self[h].extra():
1910 1913 dh -= 1
1911 1914 htext = ""
1912 1915 if dh:
1913 1916 htext = _(" (%+d heads)") % dh
1914 1917
1915 1918 self.ui.status(_("added %d changesets"
1916 1919 " with %d changes to %d files%s\n")
1917 1920 % (changesets, revisions, files, htext))
1918 1921
1919 1922 if changesets > 0:
1920 1923 p = lambda: cl.writepending() and self.root or ""
1921 1924 self.hook('pretxnchangegroup', throw=True,
1922 1925 node=hex(cl.node(clstart)), source=srctype,
1923 1926 url=url, pending=p)
1924 1927
1925 1928 # make changelog see real files again
1926 1929 cl.finalize(trp)
1927 1930
1928 1931 tr.close()
1929 1932 finally:
1930 1933 tr.release()
1931 1934 if lock:
1932 1935 lock.release()
1933 1936
1934 1937 if changesets > 0:
1935 1938 # forcefully update the on-disk branch cache
1936 1939 self.ui.debug("updating the branch cache\n")
1937 1940 self.updatebranchcache()
1938 1941 self.hook("changegroup", node=hex(cl.node(clstart)),
1939 1942 source=srctype, url=url)
1940 1943
1941 1944 for i in xrange(clstart, clend):
1942 1945 self.hook("incoming", node=hex(cl.node(i)),
1943 1946 source=srctype, url=url)
1944 1947
1945 1948 # never return 0 here:
1946 1949 if dh < 0:
1947 1950 return dh - 1
1948 1951 else:
1949 1952 return dh + 1
1950 1953
1951 1954 def stream_in(self, remote, requirements):
1952 1955 lock = self.lock()
1953 1956 try:
1954 1957 fp = remote.stream_out()
1955 1958 l = fp.readline()
1956 1959 try:
1957 1960 resp = int(l)
1958 1961 except ValueError:
1959 1962 raise error.ResponseError(
1960 1963 _('Unexpected response from remote server:'), l)
1961 1964 if resp == 1:
1962 1965 raise util.Abort(_('operation forbidden by server'))
1963 1966 elif resp == 2:
1964 1967 raise util.Abort(_('locking the remote repository failed'))
1965 1968 elif resp != 0:
1966 1969 raise util.Abort(_('the server sent an unknown error code'))
1967 1970 self.ui.status(_('streaming all changes\n'))
1968 1971 l = fp.readline()
1969 1972 try:
1970 1973 total_files, total_bytes = map(int, l.split(' ', 1))
1971 1974 except (ValueError, TypeError):
1972 1975 raise error.ResponseError(
1973 1976 _('Unexpected response from remote server:'), l)
1974 1977 self.ui.status(_('%d files to transfer, %s of data\n') %
1975 1978 (total_files, util.bytecount(total_bytes)))
1976 1979 start = time.time()
1977 1980 for i in xrange(total_files):
1978 1981 # XXX doesn't support '\n' or '\r' in filenames
1979 1982 l = fp.readline()
1980 1983 try:
1981 1984 name, size = l.split('\0', 1)
1982 1985 size = int(size)
1983 1986 except (ValueError, TypeError):
1984 1987 raise error.ResponseError(
1985 1988 _('Unexpected response from remote server:'), l)
1986 1989 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1987 1990 # for backwards compat, name was partially encoded
1988 1991 ofp = self.sopener(store.decodedir(name), 'w')
1989 1992 for chunk in util.filechunkiter(fp, limit=size):
1990 1993 ofp.write(chunk)
1991 1994 ofp.close()
1992 1995 elapsed = time.time() - start
1993 1996 if elapsed <= 0:
1994 1997 elapsed = 0.001
1995 1998 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1996 1999 (util.bytecount(total_bytes), elapsed,
1997 2000 util.bytecount(total_bytes / elapsed)))
1998 2001
1999 2002 # new requirements = old non-format requirements + new format-related
2000 2003 # requirements from the streamed-in repository
2001 2004 requirements.update(set(self.requirements) - self.supportedformats)
2002 2005 self._applyrequirements(requirements)
2003 2006 self._writerequirements()
2004 2007
2005 2008 self.invalidate()
2006 2009 return len(self.heads()) + 1
2007 2010 finally:
2008 2011 lock.release()
2009 2012
2010 2013 def clone(self, remote, heads=[], stream=False):
2011 2014 '''clone remote repository.
2012 2015
2013 2016 keyword arguments:
2014 2017 heads: list of revs to clone (forces use of pull)
2015 2018 stream: use streaming clone if possible'''
2016 2019
2017 2020 # now, all clients that can request uncompressed clones can
2018 2021 # read repo formats supported by all servers that can serve
2019 2022 # them.
2020 2023
2021 2024 # if revlog format changes, client will have to check version
2022 2025 # and format flags on "stream" capability, and use
2023 2026 # uncompressed only if compatible.
2024 2027
2025 2028 if stream and not heads:
2026 2029 # 'stream' means remote revlog format is revlogv1 only
2027 2030 if remote.capable('stream'):
2028 2031 return self.stream_in(remote, set(('revlogv1',)))
2029 2032 # otherwise, 'streamreqs' contains the remote revlog format
2030 2033 streamreqs = remote.capable('streamreqs')
2031 2034 if streamreqs:
2032 2035 streamreqs = set(streamreqs.split(','))
2033 2036 # if we support it, stream in and adjust our requirements
2034 2037 if not streamreqs - self.supportedformats:
2035 2038 return self.stream_in(remote, streamreqs)
2036 2039 return self.pull(remote, heads)
2037 2040
2038 2041 def pushkey(self, namespace, key, old, new):
2039 2042 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2040 2043 old=old, new=new)
2041 2044 ret = pushkey.push(self, namespace, key, old, new)
2042 2045 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2043 2046 ret=ret)
2044 2047 return ret
2045 2048
2046 2049 def listkeys(self, namespace):
2047 2050 self.hook('prelistkeys', throw=True, namespace=namespace)
2048 2051 values = pushkey.list(self, namespace)
2049 2052 self.hook('listkeys', namespace=namespace, values=values)
2050 2053 return values
2051 2054
2052 2055 def debugwireargs(self, one, two, three=None, four=None, five=None):
2053 2056 '''used to test argument passing over the wire'''
2054 2057 return "%s %s %s %s %s" % (one, two, three, four, five)
2055 2058
2056 2059 def savecommitmessage(self, text):
2057 2060 fp = self.opener('last-message.txt', 'wb')
2058 2061 try:
2059 2062 fp.write(text)
2060 2063 finally:
2061 2064 fp.close()
2062 2065 return self.pathto(fp.name[len(self.root)+1:])
2063 2066
2064 2067 # used to avoid circular references so destructors work
2065 2068 def aftertrans(files):
2066 2069 renamefiles = [tuple(t) for t in files]
2067 2070 def a():
2068 2071 for src, dest in renamefiles:
2069 2072 util.rename(src, dest)
2070 2073 return a
2071 2074
2072 2075 def undoname(fn):
2073 2076 base, name = os.path.split(fn)
2074 2077 assert name.startswith('journal')
2075 2078 return os.path.join(base, name.replace('journal', 'undo', 1))
2076 2079
2077 2080 def instance(ui, path, create):
2078 2081 return localrepository(ui, util.urllocalpath(path), create)
2079 2082
2080 2083 def islocal(path):
2081 2084 return True
General Comments 0
You need to be logged in to leave comments. Login now