##// END OF EJS Templates
tags: drop nested function
Matt Mackall -
r8854:980f5b7c default
parent child Browse files
Show More
@@ -1,2178 +1,2176 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache shared'.split())
22 supported = set('revlogv1 store fncache shared'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31 self.baseui = baseui
31 self.baseui = baseui
32 self.ui = baseui.copy()
32 self.ui = baseui.copy()
33
33
34 try:
34 try:
35 self.ui.readconfig(self.join("hgrc"), self.root)
35 self.ui.readconfig(self.join("hgrc"), self.root)
36 extensions.loadall(self.ui)
36 extensions.loadall(self.ui)
37 except IOError:
37 except IOError:
38 pass
38 pass
39
39
40 if not os.path.isdir(self.path):
40 if not os.path.isdir(self.path):
41 if create:
41 if create:
42 if not os.path.exists(path):
42 if not os.path.exists(path):
43 os.mkdir(path)
43 os.mkdir(path)
44 os.mkdir(self.path)
44 os.mkdir(self.path)
45 requirements = ["revlogv1"]
45 requirements = ["revlogv1"]
46 if self.ui.configbool('format', 'usestore', True):
46 if self.ui.configbool('format', 'usestore', True):
47 os.mkdir(os.path.join(self.path, "store"))
47 os.mkdir(os.path.join(self.path, "store"))
48 requirements.append("store")
48 requirements.append("store")
49 if self.ui.configbool('format', 'usefncache', True):
49 if self.ui.configbool('format', 'usefncache', True):
50 requirements.append("fncache")
50 requirements.append("fncache")
51 # create an invalid changelog
51 # create an invalid changelog
52 self.opener("00changelog.i", "a").write(
52 self.opener("00changelog.i", "a").write(
53 '\0\0\0\2' # represents revlogv2
53 '\0\0\0\2' # represents revlogv2
54 ' dummy changelog to prevent using the old repo layout'
54 ' dummy changelog to prevent using the old repo layout'
55 )
55 )
56 reqfile = self.opener("requires", "w")
56 reqfile = self.opener("requires", "w")
57 for r in requirements:
57 for r in requirements:
58 reqfile.write("%s\n" % r)
58 reqfile.write("%s\n" % r)
59 reqfile.close()
59 reqfile.close()
60 else:
60 else:
61 raise error.RepoError(_("repository %s not found") % path)
61 raise error.RepoError(_("repository %s not found") % path)
62 elif create:
62 elif create:
63 raise error.RepoError(_("repository %s already exists") % path)
63 raise error.RepoError(_("repository %s already exists") % path)
64 else:
64 else:
65 # find requirements
65 # find requirements
66 requirements = set()
66 requirements = set()
67 try:
67 try:
68 requirements = set(self.opener("requires").read().splitlines())
68 requirements = set(self.opener("requires").read().splitlines())
69 except IOError, inst:
69 except IOError, inst:
70 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
71 raise
71 raise
72 for r in requirements - self.supported:
72 for r in requirements - self.supported:
73 raise error.RepoError(_("requirement '%s' not supported") % r)
73 raise error.RepoError(_("requirement '%s' not supported") % r)
74
74
75 self.sharedpath = self.path
75 self.sharedpath = self.path
76 try:
76 try:
77 s = os.path.realpath(self.opener("sharedpath").read())
77 s = os.path.realpath(self.opener("sharedpath").read())
78 if not os.path.exists(s):
78 if not os.path.exists(s):
79 raise error.RepoError(
79 raise error.RepoError(
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
80 _('.hg/sharedpath points to nonexistent directory %s' % s))
81 self.sharedpath = s
81 self.sharedpath = s
82 except IOError, inst:
82 except IOError, inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 self.store = store.store(requirements, self.sharedpath, util.opener)
86 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.spath = self.store.path
87 self.spath = self.store.path
88 self.sopener = self.store.opener
88 self.sopener = self.store.opener
89 self.sjoin = self.store.join
89 self.sjoin = self.store.join
90 self.opener.createmode = self.store.createmode
90 self.opener.createmode = self.store.createmode
91
91
92 self.tagscache = None
92 self.tagscache = None
93 self._tagstypecache = None
93 self._tagstypecache = None
94 self.branchcache = None
94 self.branchcache = None
95 self._ubranchcache = None # UTF-8 version of branchcache
95 self._ubranchcache = None # UTF-8 version of branchcache
96 self._branchcachetip = None
96 self._branchcachetip = None
97 self.nodetagscache = None
97 self.nodetagscache = None
98 self.filterpats = {}
98 self.filterpats = {}
99 self._datafilters = {}
99 self._datafilters = {}
100 self._transref = self._lockref = self._wlockref = None
100 self._transref = self._lockref = self._wlockref = None
101
101
102 @propertycache
102 @propertycache
103 def changelog(self):
103 def changelog(self):
104 c = changelog.changelog(self.sopener)
104 c = changelog.changelog(self.sopener)
105 if 'HG_PENDING' in os.environ:
105 if 'HG_PENDING' in os.environ:
106 p = os.environ['HG_PENDING']
106 p = os.environ['HG_PENDING']
107 if p.startswith(self.root):
107 if p.startswith(self.root):
108 c.readpending('00changelog.i.a')
108 c.readpending('00changelog.i.a')
109 self.sopener.defversion = c.version
109 self.sopener.defversion = c.version
110 return c
110 return c
111
111
112 @propertycache
112 @propertycache
113 def manifest(self):
113 def manifest(self):
114 return manifest.manifest(self.sopener)
114 return manifest.manifest(self.sopener)
115
115
116 @propertycache
116 @propertycache
117 def dirstate(self):
117 def dirstate(self):
118 return dirstate.dirstate(self.opener, self.ui, self.root)
118 return dirstate.dirstate(self.opener, self.ui, self.root)
119
119
120 def __getitem__(self, changeid):
120 def __getitem__(self, changeid):
121 if changeid is None:
121 if changeid is None:
122 return context.workingctx(self)
122 return context.workingctx(self)
123 return context.changectx(self, changeid)
123 return context.changectx(self, changeid)
124
124
125 def __nonzero__(self):
125 def __nonzero__(self):
126 return True
126 return True
127
127
128 def __len__(self):
128 def __len__(self):
129 return len(self.changelog)
129 return len(self.changelog)
130
130
131 def __iter__(self):
131 def __iter__(self):
132 for i in xrange(len(self)):
132 for i in xrange(len(self)):
133 yield i
133 yield i
134
134
135 def url(self):
135 def url(self):
136 return 'file:' + self.root
136 return 'file:' + self.root
137
137
138 def hook(self, name, throw=False, **args):
138 def hook(self, name, throw=False, **args):
139 return hook.hook(self.ui, self, name, throw, **args)
139 return hook.hook(self.ui, self, name, throw, **args)
140
140
141 tag_disallowed = ':\r\n'
141 tag_disallowed = ':\r\n'
142
142
143 def _tag(self, names, node, message, local, user, date, extra={}):
143 def _tag(self, names, node, message, local, user, date, extra={}):
144 if isinstance(names, str):
144 if isinstance(names, str):
145 allchars = names
145 allchars = names
146 names = (names,)
146 names = (names,)
147 else:
147 else:
148 allchars = ''.join(names)
148 allchars = ''.join(names)
149 for c in self.tag_disallowed:
149 for c in self.tag_disallowed:
150 if c in allchars:
150 if c in allchars:
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
151 raise util.Abort(_('%r cannot be used in a tag name') % c)
152
152
153 for name in names:
153 for name in names:
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
154 self.hook('pretag', throw=True, node=hex(node), tag=name,
155 local=local)
155 local=local)
156
156
157 def writetags(fp, names, munge, prevtags):
157 def writetags(fp, names, munge, prevtags):
158 fp.seek(0, 2)
158 fp.seek(0, 2)
159 if prevtags and prevtags[-1] != '\n':
159 if prevtags and prevtags[-1] != '\n':
160 fp.write('\n')
160 fp.write('\n')
161 for name in names:
161 for name in names:
162 m = munge and munge(name) or name
162 m = munge and munge(name) or name
163 if self._tagstypecache and name in self._tagstypecache:
163 if self._tagstypecache and name in self._tagstypecache:
164 old = self.tagscache.get(name, nullid)
164 old = self.tagscache.get(name, nullid)
165 fp.write('%s %s\n' % (hex(old), m))
165 fp.write('%s %s\n' % (hex(old), m))
166 fp.write('%s %s\n' % (hex(node), m))
166 fp.write('%s %s\n' % (hex(node), m))
167 fp.close()
167 fp.close()
168
168
169 prevtags = ''
169 prevtags = ''
170 if local:
170 if local:
171 try:
171 try:
172 fp = self.opener('localtags', 'r+')
172 fp = self.opener('localtags', 'r+')
173 except IOError:
173 except IOError:
174 fp = self.opener('localtags', 'a')
174 fp = self.opener('localtags', 'a')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177
177
178 # local tags are stored in the current charset
178 # local tags are stored in the current charset
179 writetags(fp, names, None, prevtags)
179 writetags(fp, names, None, prevtags)
180 for name in names:
180 for name in names:
181 self.hook('tag', node=hex(node), tag=name, local=local)
181 self.hook('tag', node=hex(node), tag=name, local=local)
182 return
182 return
183
183
184 try:
184 try:
185 fp = self.wfile('.hgtags', 'rb+')
185 fp = self.wfile('.hgtags', 'rb+')
186 except IOError:
186 except IOError:
187 fp = self.wfile('.hgtags', 'ab')
187 fp = self.wfile('.hgtags', 'ab')
188 else:
188 else:
189 prevtags = fp.read()
189 prevtags = fp.read()
190
190
191 # committed tags are stored in UTF-8
191 # committed tags are stored in UTF-8
192 writetags(fp, names, encoding.fromlocal, prevtags)
192 writetags(fp, names, encoding.fromlocal, prevtags)
193
193
194 if '.hgtags' not in self.dirstate:
194 if '.hgtags' not in self.dirstate:
195 self.add(['.hgtags'])
195 self.add(['.hgtags'])
196
196
197 m = match_.exact(self.root, '', ['.hgtags'])
197 m = match_.exact(self.root, '', ['.hgtags'])
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
198 tagnode = self.commit(message, user, date, extra=extra, match=m)
199
199
200 for name in names:
200 for name in names:
201 self.hook('tag', node=hex(node), tag=name, local=local)
201 self.hook('tag', node=hex(node), tag=name, local=local)
202
202
203 return tagnode
203 return tagnode
204
204
205 def tag(self, names, node, message, local, user, date):
205 def tag(self, names, node, message, local, user, date):
206 '''tag a revision with one or more symbolic names.
206 '''tag a revision with one or more symbolic names.
207
207
208 names is a list of strings or, when adding a single tag, names may be a
208 names is a list of strings or, when adding a single tag, names may be a
209 string.
209 string.
210
210
211 if local is True, the tags are stored in a per-repository file.
211 if local is True, the tags are stored in a per-repository file.
212 otherwise, they are stored in the .hgtags file, and a new
212 otherwise, they are stored in the .hgtags file, and a new
213 changeset is committed with the change.
213 changeset is committed with the change.
214
214
215 keyword arguments:
215 keyword arguments:
216
216
217 local: whether to store tags in non-version-controlled file
217 local: whether to store tags in non-version-controlled file
218 (default False)
218 (default False)
219
219
220 message: commit message to use if committing
220 message: commit message to use if committing
221
221
222 user: name of user to use if committing
222 user: name of user to use if committing
223
223
224 date: date tuple to use if committing'''
224 date: date tuple to use if committing'''
225
225
226 for x in self.status()[:5]:
226 for x in self.status()[:5]:
227 if '.hgtags' in x:
227 if '.hgtags' in x:
228 raise util.Abort(_('working copy of .hgtags is changed '
228 raise util.Abort(_('working copy of .hgtags is changed '
229 '(please commit .hgtags manually)'))
229 '(please commit .hgtags manually)'))
230
230
231 self.tags() # instantiate the cache
231 self.tags() # instantiate the cache
232 self._tag(names, node, message, local, user, date)
232 self._tag(names, node, message, local, user, date)
233
233
234 def tags(self):
234 def tags(self):
235 '''return a mapping of tag to node'''
235 '''return a mapping of tag to node'''
236 if self.tagscache:
236 if self.tagscache:
237 return self.tagscache
237 return self.tagscache
238
238
239 globaltags = {}
239 globaltags = {}
240 tagtypes = {}
240 tagtypes = {}
241
241
242 def readtags(lines, fn, tagtype):
242 def readtags(lines, fn, tagtype):
243 filetags = {}
243 filetags = {}
244 count = 0
244 count = 0
245
245
246 def warn(msg):
246 def warn(msg):
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
247 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248
248
249 for l in lines:
249 for l in lines:
250 count += 1
250 count += 1
251 if not l:
251 if not l:
252 continue
252 continue
253 s = l.split(" ", 1)
253 s = l.split(" ", 1)
254 if len(s) != 2:
254 if len(s) != 2:
255 warn(_("cannot parse entry"))
255 warn(_("cannot parse entry"))
256 continue
256 continue
257 node, key = s
257 node, key = s
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
258 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 try:
259 try:
260 bin_n = bin(node)
260 bin_n = bin(node)
261 except TypeError:
261 except TypeError:
262 warn(_("node '%s' is not well formed") % node)
262 warn(_("node '%s' is not well formed") % node)
263 continue
263 continue
264 if bin_n not in self.changelog.nodemap:
264 if bin_n not in self.changelog.nodemap:
265 warn(_("tag '%s' refers to unknown node") % key)
265 warn(_("tag '%s' refers to unknown node") % key)
266 continue
266 continue
267
267
268 h = []
268 h = []
269 if key in filetags:
269 if key in filetags:
270 n, h = filetags[key]
270 n, h = filetags[key]
271 h.append(n)
271 h.append(n)
272 filetags[key] = (bin_n, h)
272 filetags[key] = (bin_n, h)
273
273
274 for k, nh in filetags.iteritems():
274 for k, nh in filetags.iteritems():
275 if k not in globaltags:
275 if k not in globaltags:
276 globaltags[k] = nh
276 globaltags[k] = nh
277 tagtypes[k] = tagtype
277 tagtypes[k] = tagtype
278 continue
278 continue
279
279
280 # we prefer the global tag if:
280 # we prefer the global tag if:
281 # it supercedes us OR
281 # it supercedes us OR
282 # mutual supercedes and it has a higher rank
282 # mutual supercedes and it has a higher rank
283 # otherwise we win because we're tip-most
283 # otherwise we win because we're tip-most
284 an, ah = nh
284 an, ah = nh
285 bn, bh = globaltags[k]
285 bn, bh = globaltags[k]
286 if (bn != an and an in bh and
286 if (bn != an and an in bh and
287 (bn not in ah or len(bh) > len(ah))):
287 (bn not in ah or len(bh) > len(ah))):
288 an = bn
288 an = bn
289 ah.extend([n for n in bh if n not in ah])
289 ah.extend([n for n in bh if n not in ah])
290 globaltags[k] = an, ah
290 globaltags[k] = an, ah
291 tagtypes[k] = tagtype
291 tagtypes[k] = tagtype
292
292
293 def tagctxs():
294 seen = set()
293 seen = set()
295 f = None
294 f = None
296 ret = []
295 ctxs = []
297 for node in self.heads():
296 for node in self.heads():
298 try:
297 try:
299 fnode = self[node].filenode('.hgtags')
298 fnode = self[node].filenode('.hgtags')
300 except error.LookupError:
299 except error.LookupError:
301 continue
300 continue
302 if fnode not in seen:
301 if fnode not in seen:
303 seen.add(fnode)
302 seen.add(fnode)
304 if not f:
303 if not f:
305 f = self.filectx('.hgtags', fileid=fnode)
304 f = self.filectx('.hgtags', fileid=fnode)
306 else:
305 else:
307 f = f.filectx(fnode)
306 f = f.filectx(fnode)
308 ret.append(f)
307 ctxs.append(f)
309 return reversed(ret)
310
308
311 # read the tags file from each head, ending with the tip
309 # read the tags file from each head, ending with the tip
312 for f in tagctxs():
310 for f in reversed(ctxs):
313 readtags(f.data().splitlines(), f, "global")
311 readtags(f.data().splitlines(), f, "global")
314
312
315 try:
313 try:
316 data = encoding.fromlocal(self.opener("localtags").read())
314 data = encoding.fromlocal(self.opener("localtags").read())
317 # localtags are stored in the local character set
315 # localtags are stored in the local character set
318 # while the internal tag table is stored in UTF-8
316 # while the internal tag table is stored in UTF-8
319 readtags(data.splitlines(), "localtags", "local")
317 readtags(data.splitlines(), "localtags", "local")
320 except IOError:
318 except IOError:
321 pass
319 pass
322
320
323 self.tagscache = {}
321 self.tagscache = {}
324 self._tagstypecache = {}
322 self._tagstypecache = {}
325 for k, nh in globaltags.iteritems():
323 for k, nh in globaltags.iteritems():
326 n = nh[0]
324 n = nh[0]
327 if n != nullid:
325 if n != nullid:
328 self.tagscache[k] = n
326 self.tagscache[k] = n
329 self._tagstypecache[k] = tagtypes[k]
327 self._tagstypecache[k] = tagtypes[k]
330 self.tagscache['tip'] = self.changelog.tip()
328 self.tagscache['tip'] = self.changelog.tip()
331 return self.tagscache
329 return self.tagscache
332
330
333 def tagtype(self, tagname):
331 def tagtype(self, tagname):
334 '''
332 '''
335 return the type of the given tag. result can be:
333 return the type of the given tag. result can be:
336
334
337 'local' : a local tag
335 'local' : a local tag
338 'global' : a global tag
336 'global' : a global tag
339 None : tag does not exist
337 None : tag does not exist
340 '''
338 '''
341
339
342 self.tags()
340 self.tags()
343
341
344 return self._tagstypecache.get(tagname)
342 return self._tagstypecache.get(tagname)
345
343
346 def tagslist(self):
344 def tagslist(self):
347 '''return a list of tags ordered by revision'''
345 '''return a list of tags ordered by revision'''
348 l = []
346 l = []
349 for t, n in self.tags().iteritems():
347 for t, n in self.tags().iteritems():
350 try:
348 try:
351 r = self.changelog.rev(n)
349 r = self.changelog.rev(n)
352 except:
350 except:
353 r = -2 # sort to the beginning of the list if unknown
351 r = -2 # sort to the beginning of the list if unknown
354 l.append((r, t, n))
352 l.append((r, t, n))
355 return [(t, n) for r, t, n in sorted(l)]
353 return [(t, n) for r, t, n in sorted(l)]
356
354
357 def nodetags(self, node):
355 def nodetags(self, node):
358 '''return the tags associated with a node'''
356 '''return the tags associated with a node'''
359 if not self.nodetagscache:
357 if not self.nodetagscache:
360 self.nodetagscache = {}
358 self.nodetagscache = {}
361 for t, n in self.tags().iteritems():
359 for t, n in self.tags().iteritems():
362 self.nodetagscache.setdefault(n, []).append(t)
360 self.nodetagscache.setdefault(n, []).append(t)
363 return self.nodetagscache.get(node, [])
361 return self.nodetagscache.get(node, [])
364
362
365 def _branchtags(self, partial, lrev):
363 def _branchtags(self, partial, lrev):
366 # TODO: rename this function?
364 # TODO: rename this function?
367 tiprev = len(self) - 1
365 tiprev = len(self) - 1
368 if lrev != tiprev:
366 if lrev != tiprev:
369 self._updatebranchcache(partial, lrev+1, tiprev+1)
367 self._updatebranchcache(partial, lrev+1, tiprev+1)
370 self._writebranchcache(partial, self.changelog.tip(), tiprev)
368 self._writebranchcache(partial, self.changelog.tip(), tiprev)
371
369
372 return partial
370 return partial
373
371
374 def branchmap(self):
372 def branchmap(self):
375 tip = self.changelog.tip()
373 tip = self.changelog.tip()
376 if self.branchcache is not None and self._branchcachetip == tip:
374 if self.branchcache is not None and self._branchcachetip == tip:
377 return self.branchcache
375 return self.branchcache
378
376
379 oldtip = self._branchcachetip
377 oldtip = self._branchcachetip
380 self._branchcachetip = tip
378 self._branchcachetip = tip
381 if self.branchcache is None:
379 if self.branchcache is None:
382 self.branchcache = {} # avoid recursion in changectx
380 self.branchcache = {} # avoid recursion in changectx
383 else:
381 else:
384 self.branchcache.clear() # keep using the same dict
382 self.branchcache.clear() # keep using the same dict
385 if oldtip is None or oldtip not in self.changelog.nodemap:
383 if oldtip is None or oldtip not in self.changelog.nodemap:
386 partial, last, lrev = self._readbranchcache()
384 partial, last, lrev = self._readbranchcache()
387 else:
385 else:
388 lrev = self.changelog.rev(oldtip)
386 lrev = self.changelog.rev(oldtip)
389 partial = self._ubranchcache
387 partial = self._ubranchcache
390
388
391 self._branchtags(partial, lrev)
389 self._branchtags(partial, lrev)
392 # this private cache holds all heads (not just tips)
390 # this private cache holds all heads (not just tips)
393 self._ubranchcache = partial
391 self._ubranchcache = partial
394
392
395 # the branch cache is stored on disk as UTF-8, but in the local
393 # the branch cache is stored on disk as UTF-8, but in the local
396 # charset internally
394 # charset internally
397 for k, v in partial.iteritems():
395 for k, v in partial.iteritems():
398 self.branchcache[encoding.tolocal(k)] = v
396 self.branchcache[encoding.tolocal(k)] = v
399 return self.branchcache
397 return self.branchcache
400
398
401
399
402 def branchtags(self):
400 def branchtags(self):
403 '''return a dict where branch names map to the tipmost head of
401 '''return a dict where branch names map to the tipmost head of
404 the branch, open heads come before closed'''
402 the branch, open heads come before closed'''
405 bt = {}
403 bt = {}
406 for bn, heads in self.branchmap().iteritems():
404 for bn, heads in self.branchmap().iteritems():
407 head = None
405 head = None
408 for i in range(len(heads)-1, -1, -1):
406 for i in range(len(heads)-1, -1, -1):
409 h = heads[i]
407 h = heads[i]
410 if 'close' not in self.changelog.read(h)[5]:
408 if 'close' not in self.changelog.read(h)[5]:
411 head = h
409 head = h
412 break
410 break
413 # no open heads were found
411 # no open heads were found
414 if head is None:
412 if head is None:
415 head = heads[-1]
413 head = heads[-1]
416 bt[bn] = head
414 bt[bn] = head
417 return bt
415 return bt
418
416
419
417
420 def _readbranchcache(self):
418 def _readbranchcache(self):
421 partial = {}
419 partial = {}
422 try:
420 try:
423 f = self.opener("branchheads.cache")
421 f = self.opener("branchheads.cache")
424 lines = f.read().split('\n')
422 lines = f.read().split('\n')
425 f.close()
423 f.close()
426 except (IOError, OSError):
424 except (IOError, OSError):
427 return {}, nullid, nullrev
425 return {}, nullid, nullrev
428
426
429 try:
427 try:
430 last, lrev = lines.pop(0).split(" ", 1)
428 last, lrev = lines.pop(0).split(" ", 1)
431 last, lrev = bin(last), int(lrev)
429 last, lrev = bin(last), int(lrev)
432 if lrev >= len(self) or self[lrev].node() != last:
430 if lrev >= len(self) or self[lrev].node() != last:
433 # invalidate the cache
431 # invalidate the cache
434 raise ValueError('invalidating branch cache (tip differs)')
432 raise ValueError('invalidating branch cache (tip differs)')
435 for l in lines:
433 for l in lines:
436 if not l: continue
434 if not l: continue
437 node, label = l.split(" ", 1)
435 node, label = l.split(" ", 1)
438 partial.setdefault(label.strip(), []).append(bin(node))
436 partial.setdefault(label.strip(), []).append(bin(node))
439 except KeyboardInterrupt:
437 except KeyboardInterrupt:
440 raise
438 raise
441 except Exception, inst:
439 except Exception, inst:
442 if self.ui.debugflag:
440 if self.ui.debugflag:
443 self.ui.warn(str(inst), '\n')
441 self.ui.warn(str(inst), '\n')
444 partial, last, lrev = {}, nullid, nullrev
442 partial, last, lrev = {}, nullid, nullrev
445 return partial, last, lrev
443 return partial, last, lrev
446
444
447 def _writebranchcache(self, branches, tip, tiprev):
445 def _writebranchcache(self, branches, tip, tiprev):
448 try:
446 try:
449 f = self.opener("branchheads.cache", "w", atomictemp=True)
447 f = self.opener("branchheads.cache", "w", atomictemp=True)
450 f.write("%s %s\n" % (hex(tip), tiprev))
448 f.write("%s %s\n" % (hex(tip), tiprev))
451 for label, nodes in branches.iteritems():
449 for label, nodes in branches.iteritems():
452 for node in nodes:
450 for node in nodes:
453 f.write("%s %s\n" % (hex(node), label))
451 f.write("%s %s\n" % (hex(node), label))
454 f.rename()
452 f.rename()
455 except (IOError, OSError):
453 except (IOError, OSError):
456 pass
454 pass
457
455
458 def _updatebranchcache(self, partial, start, end):
456 def _updatebranchcache(self, partial, start, end):
459 for r in xrange(start, end):
457 for r in xrange(start, end):
460 c = self[r]
458 c = self[r]
461 b = c.branch()
459 b = c.branch()
462 bheads = partial.setdefault(b, [])
460 bheads = partial.setdefault(b, [])
463 bheads.append(c.node())
461 bheads.append(c.node())
464 for p in c.parents():
462 for p in c.parents():
465 pn = p.node()
463 pn = p.node()
466 if pn in bheads:
464 if pn in bheads:
467 bheads.remove(pn)
465 bheads.remove(pn)
468
466
469 def lookup(self, key):
467 def lookup(self, key):
470 if isinstance(key, int):
468 if isinstance(key, int):
471 return self.changelog.node(key)
469 return self.changelog.node(key)
472 elif key == '.':
470 elif key == '.':
473 return self.dirstate.parents()[0]
471 return self.dirstate.parents()[0]
474 elif key == 'null':
472 elif key == 'null':
475 return nullid
473 return nullid
476 elif key == 'tip':
474 elif key == 'tip':
477 return self.changelog.tip()
475 return self.changelog.tip()
478 n = self.changelog._match(key)
476 n = self.changelog._match(key)
479 if n:
477 if n:
480 return n
478 return n
481 if key in self.tags():
479 if key in self.tags():
482 return self.tags()[key]
480 return self.tags()[key]
483 if key in self.branchtags():
481 if key in self.branchtags():
484 return self.branchtags()[key]
482 return self.branchtags()[key]
485 n = self.changelog._partialmatch(key)
483 n = self.changelog._partialmatch(key)
486 if n:
484 if n:
487 return n
485 return n
488
486
489 # can't find key, check if it might have come from damaged dirstate
487 # can't find key, check if it might have come from damaged dirstate
490 if key in self.dirstate.parents():
488 if key in self.dirstate.parents():
491 raise error.Abort(_("working directory has unknown parent '%s'!")
489 raise error.Abort(_("working directory has unknown parent '%s'!")
492 % short(key))
490 % short(key))
493 try:
491 try:
494 if len(key) == 20:
492 if len(key) == 20:
495 key = hex(key)
493 key = hex(key)
496 except:
494 except:
497 pass
495 pass
498 raise error.RepoError(_("unknown revision '%s'") % key)
496 raise error.RepoError(_("unknown revision '%s'") % key)
499
497
500 def local(self):
498 def local(self):
501 return True
499 return True
502
500
503 def join(self, f):
501 def join(self, f):
504 return os.path.join(self.path, f)
502 return os.path.join(self.path, f)
505
503
506 def wjoin(self, f):
504 def wjoin(self, f):
507 return os.path.join(self.root, f)
505 return os.path.join(self.root, f)
508
506
509 def rjoin(self, f):
507 def rjoin(self, f):
510 return os.path.join(self.root, util.pconvert(f))
508 return os.path.join(self.root, util.pconvert(f))
511
509
512 def file(self, f):
510 def file(self, f):
513 if f[0] == '/':
511 if f[0] == '/':
514 f = f[1:]
512 f = f[1:]
515 return filelog.filelog(self.sopener, f)
513 return filelog.filelog(self.sopener, f)
516
514
517 def changectx(self, changeid):
515 def changectx(self, changeid):
518 return self[changeid]
516 return self[changeid]
519
517
520 def parents(self, changeid=None):
518 def parents(self, changeid=None):
521 '''get list of changectxs for parents of changeid'''
519 '''get list of changectxs for parents of changeid'''
522 return self[changeid].parents()
520 return self[changeid].parents()
523
521
524 def filectx(self, path, changeid=None, fileid=None):
522 def filectx(self, path, changeid=None, fileid=None):
525 """changeid can be a changeset revision, node, or tag.
523 """changeid can be a changeset revision, node, or tag.
526 fileid can be a file revision or node."""
524 fileid can be a file revision or node."""
527 return context.filectx(self, path, changeid, fileid)
525 return context.filectx(self, path, changeid, fileid)
528
526
529 def getcwd(self):
527 def getcwd(self):
530 return self.dirstate.getcwd()
528 return self.dirstate.getcwd()
531
529
532 def pathto(self, f, cwd=None):
530 def pathto(self, f, cwd=None):
533 return self.dirstate.pathto(f, cwd)
531 return self.dirstate.pathto(f, cwd)
534
532
535 def wfile(self, f, mode='r'):
533 def wfile(self, f, mode='r'):
536 return self.wopener(f, mode)
534 return self.wopener(f, mode)
537
535
538 def _link(self, f):
536 def _link(self, f):
539 return os.path.islink(self.wjoin(f))
537 return os.path.islink(self.wjoin(f))
540
538
541 def _filter(self, filter, filename, data):
539 def _filter(self, filter, filename, data):
542 if filter not in self.filterpats:
540 if filter not in self.filterpats:
543 l = []
541 l = []
544 for pat, cmd in self.ui.configitems(filter):
542 for pat, cmd in self.ui.configitems(filter):
545 if cmd == '!':
543 if cmd == '!':
546 continue
544 continue
547 mf = match_.match(self.root, '', [pat])
545 mf = match_.match(self.root, '', [pat])
548 fn = None
546 fn = None
549 params = cmd
547 params = cmd
550 for name, filterfn in self._datafilters.iteritems():
548 for name, filterfn in self._datafilters.iteritems():
551 if cmd.startswith(name):
549 if cmd.startswith(name):
552 fn = filterfn
550 fn = filterfn
553 params = cmd[len(name):].lstrip()
551 params = cmd[len(name):].lstrip()
554 break
552 break
555 if not fn:
553 if not fn:
556 fn = lambda s, c, **kwargs: util.filter(s, c)
554 fn = lambda s, c, **kwargs: util.filter(s, c)
557 # Wrap old filters not supporting keyword arguments
555 # Wrap old filters not supporting keyword arguments
558 if not inspect.getargspec(fn)[2]:
556 if not inspect.getargspec(fn)[2]:
559 oldfn = fn
557 oldfn = fn
560 fn = lambda s, c, **kwargs: oldfn(s, c)
558 fn = lambda s, c, **kwargs: oldfn(s, c)
561 l.append((mf, fn, params))
559 l.append((mf, fn, params))
562 self.filterpats[filter] = l
560 self.filterpats[filter] = l
563
561
564 for mf, fn, cmd in self.filterpats[filter]:
562 for mf, fn, cmd in self.filterpats[filter]:
565 if mf(filename):
563 if mf(filename):
566 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
564 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
567 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
565 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
568 break
566 break
569
567
570 return data
568 return data
571
569
572 def adddatafilter(self, name, filter):
570 def adddatafilter(self, name, filter):
573 self._datafilters[name] = filter
571 self._datafilters[name] = filter
574
572
575 def wread(self, filename):
573 def wread(self, filename):
576 if self._link(filename):
574 if self._link(filename):
577 data = os.readlink(self.wjoin(filename))
575 data = os.readlink(self.wjoin(filename))
578 else:
576 else:
579 data = self.wopener(filename, 'r').read()
577 data = self.wopener(filename, 'r').read()
580 return self._filter("encode", filename, data)
578 return self._filter("encode", filename, data)
581
579
582 def wwrite(self, filename, data, flags):
580 def wwrite(self, filename, data, flags):
583 data = self._filter("decode", filename, data)
581 data = self._filter("decode", filename, data)
584 try:
582 try:
585 os.unlink(self.wjoin(filename))
583 os.unlink(self.wjoin(filename))
586 except OSError:
584 except OSError:
587 pass
585 pass
588 if 'l' in flags:
586 if 'l' in flags:
589 self.wopener.symlink(data, filename)
587 self.wopener.symlink(data, filename)
590 else:
588 else:
591 self.wopener(filename, 'w').write(data)
589 self.wopener(filename, 'w').write(data)
592 if 'x' in flags:
590 if 'x' in flags:
593 util.set_flags(self.wjoin(filename), False, True)
591 util.set_flags(self.wjoin(filename), False, True)
594
592
595 def wwritedata(self, filename, data):
593 def wwritedata(self, filename, data):
596 return self._filter("decode", filename, data)
594 return self._filter("decode", filename, data)
597
595
598 def transaction(self):
596 def transaction(self):
599 tr = self._transref and self._transref() or None
597 tr = self._transref and self._transref() or None
600 if tr and tr.running():
598 if tr and tr.running():
601 return tr.nest()
599 return tr.nest()
602
600
603 # abort here if the journal already exists
601 # abort here if the journal already exists
604 if os.path.exists(self.sjoin("journal")):
602 if os.path.exists(self.sjoin("journal")):
605 raise error.RepoError(_("journal already exists - run hg recover"))
603 raise error.RepoError(_("journal already exists - run hg recover"))
606
604
607 # save dirstate for rollback
605 # save dirstate for rollback
608 try:
606 try:
609 ds = self.opener("dirstate").read()
607 ds = self.opener("dirstate").read()
610 except IOError:
608 except IOError:
611 ds = ""
609 ds = ""
612 self.opener("journal.dirstate", "w").write(ds)
610 self.opener("journal.dirstate", "w").write(ds)
613 self.opener("journal.branch", "w").write(self.dirstate.branch())
611 self.opener("journal.branch", "w").write(self.dirstate.branch())
614
612
615 renames = [(self.sjoin("journal"), self.sjoin("undo")),
613 renames = [(self.sjoin("journal"), self.sjoin("undo")),
616 (self.join("journal.dirstate"), self.join("undo.dirstate")),
614 (self.join("journal.dirstate"), self.join("undo.dirstate")),
617 (self.join("journal.branch"), self.join("undo.branch"))]
615 (self.join("journal.branch"), self.join("undo.branch"))]
618 tr = transaction.transaction(self.ui.warn, self.sopener,
616 tr = transaction.transaction(self.ui.warn, self.sopener,
619 self.sjoin("journal"),
617 self.sjoin("journal"),
620 aftertrans(renames),
618 aftertrans(renames),
621 self.store.createmode)
619 self.store.createmode)
622 self._transref = weakref.ref(tr)
620 self._transref = weakref.ref(tr)
623 return tr
621 return tr
624
622
625 def recover(self):
623 def recover(self):
626 lock = self.lock()
624 lock = self.lock()
627 try:
625 try:
628 if os.path.exists(self.sjoin("journal")):
626 if os.path.exists(self.sjoin("journal")):
629 self.ui.status(_("rolling back interrupted transaction\n"))
627 self.ui.status(_("rolling back interrupted transaction\n"))
630 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
628 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
631 self.invalidate()
629 self.invalidate()
632 return True
630 return True
633 else:
631 else:
634 self.ui.warn(_("no interrupted transaction available\n"))
632 self.ui.warn(_("no interrupted transaction available\n"))
635 return False
633 return False
636 finally:
634 finally:
637 lock.release()
635 lock.release()
638
636
639 def rollback(self):
637 def rollback(self):
640 wlock = lock = None
638 wlock = lock = None
641 try:
639 try:
642 wlock = self.wlock()
640 wlock = self.wlock()
643 lock = self.lock()
641 lock = self.lock()
644 if os.path.exists(self.sjoin("undo")):
642 if os.path.exists(self.sjoin("undo")):
645 self.ui.status(_("rolling back last transaction\n"))
643 self.ui.status(_("rolling back last transaction\n"))
646 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
644 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
647 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
645 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
648 try:
646 try:
649 branch = self.opener("undo.branch").read()
647 branch = self.opener("undo.branch").read()
650 self.dirstate.setbranch(branch)
648 self.dirstate.setbranch(branch)
651 except IOError:
649 except IOError:
652 self.ui.warn(_("Named branch could not be reset, "
650 self.ui.warn(_("Named branch could not be reset, "
653 "current branch still is: %s\n")
651 "current branch still is: %s\n")
654 % encoding.tolocal(self.dirstate.branch()))
652 % encoding.tolocal(self.dirstate.branch()))
655 self.invalidate()
653 self.invalidate()
656 self.dirstate.invalidate()
654 self.dirstate.invalidate()
657 else:
655 else:
658 self.ui.warn(_("no rollback information available\n"))
656 self.ui.warn(_("no rollback information available\n"))
659 finally:
657 finally:
660 release(lock, wlock)
658 release(lock, wlock)
661
659
662 def invalidate(self):
660 def invalidate(self):
663 for a in "changelog manifest".split():
661 for a in "changelog manifest".split():
664 if a in self.__dict__:
662 if a in self.__dict__:
665 delattr(self, a)
663 delattr(self, a)
666 self.tagscache = None
664 self.tagscache = None
667 self._tagstypecache = None
665 self._tagstypecache = None
668 self.nodetagscache = None
666 self.nodetagscache = None
669 self.branchcache = None
667 self.branchcache = None
670 self._ubranchcache = None
668 self._ubranchcache = None
671 self._branchcachetip = None
669 self._branchcachetip = None
672
670
673 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
671 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
674 try:
672 try:
675 l = lock.lock(lockname, 0, releasefn, desc=desc)
673 l = lock.lock(lockname, 0, releasefn, desc=desc)
676 except error.LockHeld, inst:
674 except error.LockHeld, inst:
677 if not wait:
675 if not wait:
678 raise
676 raise
679 self.ui.warn(_("waiting for lock on %s held by %r\n") %
677 self.ui.warn(_("waiting for lock on %s held by %r\n") %
680 (desc, inst.locker))
678 (desc, inst.locker))
681 # default to 600 seconds timeout
679 # default to 600 seconds timeout
682 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
680 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
683 releasefn, desc=desc)
681 releasefn, desc=desc)
684 if acquirefn:
682 if acquirefn:
685 acquirefn()
683 acquirefn()
686 return l
684 return l
687
685
688 def lock(self, wait=True):
686 def lock(self, wait=True):
689 l = self._lockref and self._lockref()
687 l = self._lockref and self._lockref()
690 if l is not None and l.held:
688 if l is not None and l.held:
691 l.lock()
689 l.lock()
692 return l
690 return l
693
691
694 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
692 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
695 _('repository %s') % self.origroot)
693 _('repository %s') % self.origroot)
696 self._lockref = weakref.ref(l)
694 self._lockref = weakref.ref(l)
697 return l
695 return l
698
696
699 def wlock(self, wait=True):
697 def wlock(self, wait=True):
700 l = self._wlockref and self._wlockref()
698 l = self._wlockref and self._wlockref()
701 if l is not None and l.held:
699 if l is not None and l.held:
702 l.lock()
700 l.lock()
703 return l
701 return l
704
702
705 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
703 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
706 self.dirstate.invalidate, _('working directory of %s') %
704 self.dirstate.invalidate, _('working directory of %s') %
707 self.origroot)
705 self.origroot)
708 self._wlockref = weakref.ref(l)
706 self._wlockref = weakref.ref(l)
709 return l
707 return l
710
708
711 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
709 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
712 """
710 """
713 commit an individual file as part of a larger transaction
711 commit an individual file as part of a larger transaction
714 """
712 """
715
713
716 fname = fctx.path()
714 fname = fctx.path()
717 text = fctx.data()
715 text = fctx.data()
718 flog = self.file(fname)
716 flog = self.file(fname)
719 fparent1 = manifest1.get(fname, nullid)
717 fparent1 = manifest1.get(fname, nullid)
720 fparent2 = fparent2o = manifest2.get(fname, nullid)
718 fparent2 = fparent2o = manifest2.get(fname, nullid)
721
719
722 meta = {}
720 meta = {}
723 copy = fctx.renamed()
721 copy = fctx.renamed()
724 if copy and copy[0] != fname:
722 if copy and copy[0] != fname:
725 # Mark the new revision of this file as a copy of another
723 # Mark the new revision of this file as a copy of another
726 # file. This copy data will effectively act as a parent
724 # file. This copy data will effectively act as a parent
727 # of this new revision. If this is a merge, the first
725 # of this new revision. If this is a merge, the first
728 # parent will be the nullid (meaning "look up the copy data")
726 # parent will be the nullid (meaning "look up the copy data")
729 # and the second one will be the other parent. For example:
727 # and the second one will be the other parent. For example:
730 #
728 #
731 # 0 --- 1 --- 3 rev1 changes file foo
729 # 0 --- 1 --- 3 rev1 changes file foo
732 # \ / rev2 renames foo to bar and changes it
730 # \ / rev2 renames foo to bar and changes it
733 # \- 2 -/ rev3 should have bar with all changes and
731 # \- 2 -/ rev3 should have bar with all changes and
734 # should record that bar descends from
732 # should record that bar descends from
735 # bar in rev2 and foo in rev1
733 # bar in rev2 and foo in rev1
736 #
734 #
737 # this allows this merge to succeed:
735 # this allows this merge to succeed:
738 #
736 #
739 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
737 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
740 # \ / merging rev3 and rev4 should use bar@rev2
738 # \ / merging rev3 and rev4 should use bar@rev2
741 # \- 2 --- 4 as the merge base
739 # \- 2 --- 4 as the merge base
742 #
740 #
743
741
744 cfname = copy[0]
742 cfname = copy[0]
745 crev = manifest1.get(cfname)
743 crev = manifest1.get(cfname)
746 newfparent = fparent2
744 newfparent = fparent2
747
745
748 if manifest2: # branch merge
746 if manifest2: # branch merge
749 if fparent2 == nullid or crev is None: # copied on remote side
747 if fparent2 == nullid or crev is None: # copied on remote side
750 if cfname in manifest2:
748 if cfname in manifest2:
751 crev = manifest2[cfname]
749 crev = manifest2[cfname]
752 newfparent = fparent1
750 newfparent = fparent1
753
751
754 # find source in nearest ancestor if we've lost track
752 # find source in nearest ancestor if we've lost track
755 if not crev:
753 if not crev:
756 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
754 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
757 (fname, cfname))
755 (fname, cfname))
758 for ancestor in self['.'].ancestors():
756 for ancestor in self['.'].ancestors():
759 if cfname in ancestor:
757 if cfname in ancestor:
760 crev = ancestor[cfname].filenode()
758 crev = ancestor[cfname].filenode()
761 break
759 break
762
760
763 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
761 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
764 meta["copy"] = cfname
762 meta["copy"] = cfname
765 meta["copyrev"] = hex(crev)
763 meta["copyrev"] = hex(crev)
766 fparent1, fparent2 = nullid, newfparent
764 fparent1, fparent2 = nullid, newfparent
767 elif fparent2 != nullid:
765 elif fparent2 != nullid:
768 # is one parent an ancestor of the other?
766 # is one parent an ancestor of the other?
769 fparentancestor = flog.ancestor(fparent1, fparent2)
767 fparentancestor = flog.ancestor(fparent1, fparent2)
770 if fparentancestor == fparent1:
768 if fparentancestor == fparent1:
771 fparent1, fparent2 = fparent2, nullid
769 fparent1, fparent2 = fparent2, nullid
772 elif fparentancestor == fparent2:
770 elif fparentancestor == fparent2:
773 fparent2 = nullid
771 fparent2 = nullid
774
772
775 # is the file changed?
773 # is the file changed?
776 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
774 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
777 changelist.append(fname)
775 changelist.append(fname)
778 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
776 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
779
777
780 # are just the flags changed during merge?
778 # are just the flags changed during merge?
781 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
779 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
782 changelist.append(fname)
780 changelist.append(fname)
783
781
784 return fparent1
782 return fparent1
785
783
786 def commit(self, text="", user=None, date=None, match=None, force=False,
784 def commit(self, text="", user=None, date=None, match=None, force=False,
787 editor=False, extra={}):
785 editor=False, extra={}):
788 """Add a new revision to current repository.
786 """Add a new revision to current repository.
789
787
790 Revision information is gathered from the working directory,
788 Revision information is gathered from the working directory,
791 match can be used to filter the committed files. If editor is
789 match can be used to filter the committed files. If editor is
792 supplied, it is called to get a commit message.
790 supplied, it is called to get a commit message.
793 """
791 """
794
792
795 def fail(f, msg):
793 def fail(f, msg):
796 raise util.Abort('%s: %s' % (f, msg))
794 raise util.Abort('%s: %s' % (f, msg))
797
795
798 if not match:
796 if not match:
799 match = match_.always(self.root, '')
797 match = match_.always(self.root, '')
800
798
801 if not force:
799 if not force:
802 vdirs = []
800 vdirs = []
803 match.dir = vdirs.append
801 match.dir = vdirs.append
804 match.bad = fail
802 match.bad = fail
805
803
806 wlock = self.wlock()
804 wlock = self.wlock()
807 try:
805 try:
808 p1, p2 = self.dirstate.parents()
806 p1, p2 = self.dirstate.parents()
809 wctx = self[None]
807 wctx = self[None]
810
808
811 if (not force and p2 != nullid and match and
809 if (not force and p2 != nullid and match and
812 (match.files() or match.anypats())):
810 (match.files() or match.anypats())):
813 raise util.Abort(_('cannot partially commit a merge '
811 raise util.Abort(_('cannot partially commit a merge '
814 '(do not specify files or patterns)'))
812 '(do not specify files or patterns)'))
815
813
816 changes = self.status(match=match, clean=force)
814 changes = self.status(match=match, clean=force)
817 if force:
815 if force:
818 changes[0].extend(changes[6]) # mq may commit unchanged files
816 changes[0].extend(changes[6]) # mq may commit unchanged files
819
817
820 # check subrepos
818 # check subrepos
821 subs = []
819 subs = []
822 for s in wctx.substate:
820 for s in wctx.substate:
823 if match(s) and wctx.sub(s).dirty():
821 if match(s) and wctx.sub(s).dirty():
824 subs.append(s)
822 subs.append(s)
825 if subs and '.hgsubstate' not in changes[0]:
823 if subs and '.hgsubstate' not in changes[0]:
826 changes[0].insert(0, '.hgsubstate')
824 changes[0].insert(0, '.hgsubstate')
827
825
828 # make sure all explicit patterns are matched
826 # make sure all explicit patterns are matched
829 if not force and match.files():
827 if not force and match.files():
830 matched = set(changes[0] + changes[1] + changes[2])
828 matched = set(changes[0] + changes[1] + changes[2])
831
829
832 for f in match.files():
830 for f in match.files():
833 if f == '.' or f in matched or f in wctx.substate:
831 if f == '.' or f in matched or f in wctx.substate:
834 continue
832 continue
835 if f in changes[3]: # missing
833 if f in changes[3]: # missing
836 fail(f, _('file not found!'))
834 fail(f, _('file not found!'))
837 if f in vdirs: # visited directory
835 if f in vdirs: # visited directory
838 d = f + '/'
836 d = f + '/'
839 for mf in matched:
837 for mf in matched:
840 if mf.startswith(d):
838 if mf.startswith(d):
841 break
839 break
842 else:
840 else:
843 fail(f, _("no match under directory!"))
841 fail(f, _("no match under directory!"))
844 elif f not in self.dirstate:
842 elif f not in self.dirstate:
845 fail(f, _("file not tracked!"))
843 fail(f, _("file not tracked!"))
846
844
847 if (not force and not extra.get("close") and p2 == nullid
845 if (not force and not extra.get("close") and p2 == nullid
848 and not (changes[0] or changes[1] or changes[2])
846 and not (changes[0] or changes[1] or changes[2])
849 and self[None].branch() == self['.'].branch()):
847 and self[None].branch() == self['.'].branch()):
850 self.ui.status(_("nothing changed\n"))
848 self.ui.status(_("nothing changed\n"))
851 return None
849 return None
852
850
853 ms = merge_.mergestate(self)
851 ms = merge_.mergestate(self)
854 for f in changes[0]:
852 for f in changes[0]:
855 if f in ms and ms[f] == 'u':
853 if f in ms and ms[f] == 'u':
856 raise util.Abort(_("unresolved merge conflicts "
854 raise util.Abort(_("unresolved merge conflicts "
857 "(see hg resolve)"))
855 "(see hg resolve)"))
858
856
859 cctx = context.workingctx(self, (p1, p2), text, user, date,
857 cctx = context.workingctx(self, (p1, p2), text, user, date,
860 extra, changes)
858 extra, changes)
861 if editor:
859 if editor:
862 cctx._text = editor(self, cctx)
860 cctx._text = editor(self, cctx)
863
861
864 # commit subs
862 # commit subs
865 if subs:
863 if subs:
866 state = wctx.substate.copy()
864 state = wctx.substate.copy()
867 for s in subs:
865 for s in subs:
868 self.ui.status(_('committing subrepository %s\n') % s)
866 self.ui.status(_('committing subrepository %s\n') % s)
869 sr = wctx.sub(s).commit(cctx._text, user, date)
867 sr = wctx.sub(s).commit(cctx._text, user, date)
870 state[s] = (state[s][0], sr)
868 state[s] = (state[s][0], sr)
871 subrepo.writestate(self, state)
869 subrepo.writestate(self, state)
872
870
873 ret = self.commitctx(cctx, True)
871 ret = self.commitctx(cctx, True)
874
872
875 # update dirstate and mergestate
873 # update dirstate and mergestate
876 for f in changes[0] + changes[1]:
874 for f in changes[0] + changes[1]:
877 self.dirstate.normal(f)
875 self.dirstate.normal(f)
878 for f in changes[2]:
876 for f in changes[2]:
879 self.dirstate.forget(f)
877 self.dirstate.forget(f)
880 self.dirstate.setparents(ret)
878 self.dirstate.setparents(ret)
881 ms.reset()
879 ms.reset()
882
880
883 return ret
881 return ret
884
882
885 finally:
883 finally:
886 wlock.release()
884 wlock.release()
887
885
888 def commitctx(self, ctx, error=False):
886 def commitctx(self, ctx, error=False):
889 """Add a new revision to current repository.
887 """Add a new revision to current repository.
890
888
891 Revision information is passed via the context argument.
889 Revision information is passed via the context argument.
892 """
890 """
893
891
894 tr = lock = None
892 tr = lock = None
895 removed = ctx.removed()
893 removed = ctx.removed()
896 p1, p2 = ctx.p1(), ctx.p2()
894 p1, p2 = ctx.p1(), ctx.p2()
897 m1 = p1.manifest().copy()
895 m1 = p1.manifest().copy()
898 m2 = p2.manifest()
896 m2 = p2.manifest()
899 user = ctx.user()
897 user = ctx.user()
900
898
901 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
899 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
902 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
900 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
903
901
904 lock = self.lock()
902 lock = self.lock()
905 try:
903 try:
906 tr = self.transaction()
904 tr = self.transaction()
907 trp = weakref.proxy(tr)
905 trp = weakref.proxy(tr)
908
906
909 # check in files
907 # check in files
910 new = {}
908 new = {}
911 changed = []
909 changed = []
912 linkrev = len(self)
910 linkrev = len(self)
913 for f in sorted(ctx.modified() + ctx.added()):
911 for f in sorted(ctx.modified() + ctx.added()):
914 self.ui.note(f + "\n")
912 self.ui.note(f + "\n")
915 try:
913 try:
916 fctx = ctx[f]
914 fctx = ctx[f]
917 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
915 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
918 changed)
916 changed)
919 m1.set(f, fctx.flags())
917 m1.set(f, fctx.flags())
920 except (OSError, IOError):
918 except (OSError, IOError):
921 if error:
919 if error:
922 self.ui.warn(_("trouble committing %s!\n") % f)
920 self.ui.warn(_("trouble committing %s!\n") % f)
923 raise
921 raise
924 else:
922 else:
925 removed.append(f)
923 removed.append(f)
926
924
927 # update manifest
925 # update manifest
928 m1.update(new)
926 m1.update(new)
929 removed = [f for f in sorted(removed) if f in m1 or f in m2]
927 removed = [f for f in sorted(removed) if f in m1 or f in m2]
930 drop = [f for f in removed if f in m1]
928 drop = [f for f in removed if f in m1]
931 for f in drop:
929 for f in drop:
932 del m1[f]
930 del m1[f]
933 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
931 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
934 p2.manifestnode(), (new, drop))
932 p2.manifestnode(), (new, drop))
935
933
936 # update changelog
934 # update changelog
937 self.changelog.delayupdate()
935 self.changelog.delayupdate()
938 n = self.changelog.add(mn, changed + removed, ctx.description(),
936 n = self.changelog.add(mn, changed + removed, ctx.description(),
939 trp, p1.node(), p2.node(),
937 trp, p1.node(), p2.node(),
940 user, ctx.date(), ctx.extra().copy())
938 user, ctx.date(), ctx.extra().copy())
941 p = lambda: self.changelog.writepending() and self.root or ""
939 p = lambda: self.changelog.writepending() and self.root or ""
942 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
940 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
943 parent2=xp2, pending=p)
941 parent2=xp2, pending=p)
944 self.changelog.finalize(trp)
942 self.changelog.finalize(trp)
945 tr.close()
943 tr.close()
946
944
947 if self.branchcache:
945 if self.branchcache:
948 self.branchtags()
946 self.branchtags()
949
947
950 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
948 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
951 return n
949 return n
952 finally:
950 finally:
953 del tr
951 del tr
954 lock.release()
952 lock.release()
955
953
956 def walk(self, match, node=None):
954 def walk(self, match, node=None):
957 '''
955 '''
958 walk recursively through the directory tree or a given
956 walk recursively through the directory tree or a given
959 changeset, finding all files matched by the match
957 changeset, finding all files matched by the match
960 function
958 function
961 '''
959 '''
962 return self[node].walk(match)
960 return self[node].walk(match)
963
961
964 def status(self, node1='.', node2=None, match=None,
962 def status(self, node1='.', node2=None, match=None,
965 ignored=False, clean=False, unknown=False):
963 ignored=False, clean=False, unknown=False):
966 """return status of files between two nodes or node and working directory
964 """return status of files between two nodes or node and working directory
967
965
968 If node1 is None, use the first dirstate parent instead.
966 If node1 is None, use the first dirstate parent instead.
969 If node2 is None, compare node1 with working directory.
967 If node2 is None, compare node1 with working directory.
970 """
968 """
971
969
972 def mfmatches(ctx):
970 def mfmatches(ctx):
973 mf = ctx.manifest().copy()
971 mf = ctx.manifest().copy()
974 for fn in mf.keys():
972 for fn in mf.keys():
975 if not match(fn):
973 if not match(fn):
976 del mf[fn]
974 del mf[fn]
977 return mf
975 return mf
978
976
979 if isinstance(node1, context.changectx):
977 if isinstance(node1, context.changectx):
980 ctx1 = node1
978 ctx1 = node1
981 else:
979 else:
982 ctx1 = self[node1]
980 ctx1 = self[node1]
983 if isinstance(node2, context.changectx):
981 if isinstance(node2, context.changectx):
984 ctx2 = node2
982 ctx2 = node2
985 else:
983 else:
986 ctx2 = self[node2]
984 ctx2 = self[node2]
987
985
988 working = ctx2.rev() is None
986 working = ctx2.rev() is None
989 parentworking = working and ctx1 == self['.']
987 parentworking = working and ctx1 == self['.']
990 match = match or match_.always(self.root, self.getcwd())
988 match = match or match_.always(self.root, self.getcwd())
991 listignored, listclean, listunknown = ignored, clean, unknown
989 listignored, listclean, listunknown = ignored, clean, unknown
992
990
993 # load earliest manifest first for caching reasons
991 # load earliest manifest first for caching reasons
994 if not working and ctx2.rev() < ctx1.rev():
992 if not working and ctx2.rev() < ctx1.rev():
995 ctx2.manifest()
993 ctx2.manifest()
996
994
997 if not parentworking:
995 if not parentworking:
998 def bad(f, msg):
996 def bad(f, msg):
999 if f not in ctx1:
997 if f not in ctx1:
1000 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
998 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1001 match.bad = bad
999 match.bad = bad
1002
1000
1003 if working: # we need to scan the working dir
1001 if working: # we need to scan the working dir
1004 s = self.dirstate.status(match, listignored, listclean, listunknown)
1002 s = self.dirstate.status(match, listignored, listclean, listunknown)
1005 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1003 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1006
1004
1007 # check for any possibly clean files
1005 # check for any possibly clean files
1008 if parentworking and cmp:
1006 if parentworking and cmp:
1009 fixup = []
1007 fixup = []
1010 # do a full compare of any files that might have changed
1008 # do a full compare of any files that might have changed
1011 for f in sorted(cmp):
1009 for f in sorted(cmp):
1012 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1010 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1013 or ctx1[f].cmp(ctx2[f].data())):
1011 or ctx1[f].cmp(ctx2[f].data())):
1014 modified.append(f)
1012 modified.append(f)
1015 else:
1013 else:
1016 fixup.append(f)
1014 fixup.append(f)
1017
1015
1018 if listclean:
1016 if listclean:
1019 clean += fixup
1017 clean += fixup
1020
1018
1021 # update dirstate for files that are actually clean
1019 # update dirstate for files that are actually clean
1022 if fixup:
1020 if fixup:
1023 try:
1021 try:
1024 # updating the dirstate is optional
1022 # updating the dirstate is optional
1025 # so we don't wait on the lock
1023 # so we don't wait on the lock
1026 wlock = self.wlock(False)
1024 wlock = self.wlock(False)
1027 try:
1025 try:
1028 for f in fixup:
1026 for f in fixup:
1029 self.dirstate.normal(f)
1027 self.dirstate.normal(f)
1030 finally:
1028 finally:
1031 wlock.release()
1029 wlock.release()
1032 except error.LockError:
1030 except error.LockError:
1033 pass
1031 pass
1034
1032
1035 if not parentworking:
1033 if not parentworking:
1036 mf1 = mfmatches(ctx1)
1034 mf1 = mfmatches(ctx1)
1037 if working:
1035 if working:
1038 # we are comparing working dir against non-parent
1036 # we are comparing working dir against non-parent
1039 # generate a pseudo-manifest for the working dir
1037 # generate a pseudo-manifest for the working dir
1040 mf2 = mfmatches(self['.'])
1038 mf2 = mfmatches(self['.'])
1041 for f in cmp + modified + added:
1039 for f in cmp + modified + added:
1042 mf2[f] = None
1040 mf2[f] = None
1043 mf2.set(f, ctx2.flags(f))
1041 mf2.set(f, ctx2.flags(f))
1044 for f in removed:
1042 for f in removed:
1045 if f in mf2:
1043 if f in mf2:
1046 del mf2[f]
1044 del mf2[f]
1047 else:
1045 else:
1048 # we are comparing two revisions
1046 # we are comparing two revisions
1049 deleted, unknown, ignored = [], [], []
1047 deleted, unknown, ignored = [], [], []
1050 mf2 = mfmatches(ctx2)
1048 mf2 = mfmatches(ctx2)
1051
1049
1052 modified, added, clean = [], [], []
1050 modified, added, clean = [], [], []
1053 for fn in mf2:
1051 for fn in mf2:
1054 if fn in mf1:
1052 if fn in mf1:
1055 if (mf1.flags(fn) != mf2.flags(fn) or
1053 if (mf1.flags(fn) != mf2.flags(fn) or
1056 (mf1[fn] != mf2[fn] and
1054 (mf1[fn] != mf2[fn] and
1057 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1055 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1058 modified.append(fn)
1056 modified.append(fn)
1059 elif listclean:
1057 elif listclean:
1060 clean.append(fn)
1058 clean.append(fn)
1061 del mf1[fn]
1059 del mf1[fn]
1062 else:
1060 else:
1063 added.append(fn)
1061 added.append(fn)
1064 removed = mf1.keys()
1062 removed = mf1.keys()
1065
1063
1066 r = modified, added, removed, deleted, unknown, ignored, clean
1064 r = modified, added, removed, deleted, unknown, ignored, clean
1067 [l.sort() for l in r]
1065 [l.sort() for l in r]
1068 return r
1066 return r
1069
1067
1070 def add(self, list):
1068 def add(self, list):
1071 wlock = self.wlock()
1069 wlock = self.wlock()
1072 try:
1070 try:
1073 rejected = []
1071 rejected = []
1074 for f in list:
1072 for f in list:
1075 p = self.wjoin(f)
1073 p = self.wjoin(f)
1076 try:
1074 try:
1077 st = os.lstat(p)
1075 st = os.lstat(p)
1078 except:
1076 except:
1079 self.ui.warn(_("%s does not exist!\n") % f)
1077 self.ui.warn(_("%s does not exist!\n") % f)
1080 rejected.append(f)
1078 rejected.append(f)
1081 continue
1079 continue
1082 if st.st_size > 10000000:
1080 if st.st_size > 10000000:
1083 self.ui.warn(_("%s: files over 10MB may cause memory and"
1081 self.ui.warn(_("%s: files over 10MB may cause memory and"
1084 " performance problems\n"
1082 " performance problems\n"
1085 "(use 'hg revert %s' to unadd the file)\n")
1083 "(use 'hg revert %s' to unadd the file)\n")
1086 % (f, f))
1084 % (f, f))
1087 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1085 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1088 self.ui.warn(_("%s not added: only files and symlinks "
1086 self.ui.warn(_("%s not added: only files and symlinks "
1089 "supported currently\n") % f)
1087 "supported currently\n") % f)
1090 rejected.append(p)
1088 rejected.append(p)
1091 elif self.dirstate[f] in 'amn':
1089 elif self.dirstate[f] in 'amn':
1092 self.ui.warn(_("%s already tracked!\n") % f)
1090 self.ui.warn(_("%s already tracked!\n") % f)
1093 elif self.dirstate[f] == 'r':
1091 elif self.dirstate[f] == 'r':
1094 self.dirstate.normallookup(f)
1092 self.dirstate.normallookup(f)
1095 else:
1093 else:
1096 self.dirstate.add(f)
1094 self.dirstate.add(f)
1097 return rejected
1095 return rejected
1098 finally:
1096 finally:
1099 wlock.release()
1097 wlock.release()
1100
1098
1101 def forget(self, list):
1099 def forget(self, list):
1102 wlock = self.wlock()
1100 wlock = self.wlock()
1103 try:
1101 try:
1104 for f in list:
1102 for f in list:
1105 if self.dirstate[f] != 'a':
1103 if self.dirstate[f] != 'a':
1106 self.ui.warn(_("%s not added!\n") % f)
1104 self.ui.warn(_("%s not added!\n") % f)
1107 else:
1105 else:
1108 self.dirstate.forget(f)
1106 self.dirstate.forget(f)
1109 finally:
1107 finally:
1110 wlock.release()
1108 wlock.release()
1111
1109
1112 def remove(self, list, unlink=False):
1110 def remove(self, list, unlink=False):
1113 if unlink:
1111 if unlink:
1114 for f in list:
1112 for f in list:
1115 try:
1113 try:
1116 util.unlink(self.wjoin(f))
1114 util.unlink(self.wjoin(f))
1117 except OSError, inst:
1115 except OSError, inst:
1118 if inst.errno != errno.ENOENT:
1116 if inst.errno != errno.ENOENT:
1119 raise
1117 raise
1120 wlock = self.wlock()
1118 wlock = self.wlock()
1121 try:
1119 try:
1122 for f in list:
1120 for f in list:
1123 if unlink and os.path.exists(self.wjoin(f)):
1121 if unlink and os.path.exists(self.wjoin(f)):
1124 self.ui.warn(_("%s still exists!\n") % f)
1122 self.ui.warn(_("%s still exists!\n") % f)
1125 elif self.dirstate[f] == 'a':
1123 elif self.dirstate[f] == 'a':
1126 self.dirstate.forget(f)
1124 self.dirstate.forget(f)
1127 elif f not in self.dirstate:
1125 elif f not in self.dirstate:
1128 self.ui.warn(_("%s not tracked!\n") % f)
1126 self.ui.warn(_("%s not tracked!\n") % f)
1129 else:
1127 else:
1130 self.dirstate.remove(f)
1128 self.dirstate.remove(f)
1131 finally:
1129 finally:
1132 wlock.release()
1130 wlock.release()
1133
1131
1134 def undelete(self, list):
1132 def undelete(self, list):
1135 manifests = [self.manifest.read(self.changelog.read(p)[0])
1133 manifests = [self.manifest.read(self.changelog.read(p)[0])
1136 for p in self.dirstate.parents() if p != nullid]
1134 for p in self.dirstate.parents() if p != nullid]
1137 wlock = self.wlock()
1135 wlock = self.wlock()
1138 try:
1136 try:
1139 for f in list:
1137 for f in list:
1140 if self.dirstate[f] != 'r':
1138 if self.dirstate[f] != 'r':
1141 self.ui.warn(_("%s not removed!\n") % f)
1139 self.ui.warn(_("%s not removed!\n") % f)
1142 else:
1140 else:
1143 m = f in manifests[0] and manifests[0] or manifests[1]
1141 m = f in manifests[0] and manifests[0] or manifests[1]
1144 t = self.file(f).read(m[f])
1142 t = self.file(f).read(m[f])
1145 self.wwrite(f, t, m.flags(f))
1143 self.wwrite(f, t, m.flags(f))
1146 self.dirstate.normal(f)
1144 self.dirstate.normal(f)
1147 finally:
1145 finally:
1148 wlock.release()
1146 wlock.release()
1149
1147
1150 def copy(self, source, dest):
1148 def copy(self, source, dest):
1151 p = self.wjoin(dest)
1149 p = self.wjoin(dest)
1152 if not (os.path.exists(p) or os.path.islink(p)):
1150 if not (os.path.exists(p) or os.path.islink(p)):
1153 self.ui.warn(_("%s does not exist!\n") % dest)
1151 self.ui.warn(_("%s does not exist!\n") % dest)
1154 elif not (os.path.isfile(p) or os.path.islink(p)):
1152 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 self.ui.warn(_("copy failed: %s is not a file or a "
1153 self.ui.warn(_("copy failed: %s is not a file or a "
1156 "symbolic link\n") % dest)
1154 "symbolic link\n") % dest)
1157 else:
1155 else:
1158 wlock = self.wlock()
1156 wlock = self.wlock()
1159 try:
1157 try:
1160 if self.dirstate[dest] in '?r':
1158 if self.dirstate[dest] in '?r':
1161 self.dirstate.add(dest)
1159 self.dirstate.add(dest)
1162 self.dirstate.copy(source, dest)
1160 self.dirstate.copy(source, dest)
1163 finally:
1161 finally:
1164 wlock.release()
1162 wlock.release()
1165
1163
1166 def heads(self, start=None):
1164 def heads(self, start=None):
1167 heads = self.changelog.heads(start)
1165 heads = self.changelog.heads(start)
1168 # sort the output in rev descending order
1166 # sort the output in rev descending order
1169 heads = [(-self.changelog.rev(h), h) for h in heads]
1167 heads = [(-self.changelog.rev(h), h) for h in heads]
1170 return [n for (r, n) in sorted(heads)]
1168 return [n for (r, n) in sorted(heads)]
1171
1169
1172 def branchheads(self, branch=None, start=None, closed=False):
1170 def branchheads(self, branch=None, start=None, closed=False):
1173 if branch is None:
1171 if branch is None:
1174 branch = self[None].branch()
1172 branch = self[None].branch()
1175 branches = self.branchmap()
1173 branches = self.branchmap()
1176 if branch not in branches:
1174 if branch not in branches:
1177 return []
1175 return []
1178 bheads = branches[branch]
1176 bheads = branches[branch]
1179 # the cache returns heads ordered lowest to highest
1177 # the cache returns heads ordered lowest to highest
1180 bheads.reverse()
1178 bheads.reverse()
1181 if start is not None:
1179 if start is not None:
1182 # filter out the heads that cannot be reached from startrev
1180 # filter out the heads that cannot be reached from startrev
1183 bheads = self.changelog.nodesbetween([start], bheads)[2]
1181 bheads = self.changelog.nodesbetween([start], bheads)[2]
1184 if not closed:
1182 if not closed:
1185 bheads = [h for h in bheads if
1183 bheads = [h for h in bheads if
1186 ('close' not in self.changelog.read(h)[5])]
1184 ('close' not in self.changelog.read(h)[5])]
1187 return bheads
1185 return bheads
1188
1186
1189 def branches(self, nodes):
1187 def branches(self, nodes):
1190 if not nodes:
1188 if not nodes:
1191 nodes = [self.changelog.tip()]
1189 nodes = [self.changelog.tip()]
1192 b = []
1190 b = []
1193 for n in nodes:
1191 for n in nodes:
1194 t = n
1192 t = n
1195 while 1:
1193 while 1:
1196 p = self.changelog.parents(n)
1194 p = self.changelog.parents(n)
1197 if p[1] != nullid or p[0] == nullid:
1195 if p[1] != nullid or p[0] == nullid:
1198 b.append((t, n, p[0], p[1]))
1196 b.append((t, n, p[0], p[1]))
1199 break
1197 break
1200 n = p[0]
1198 n = p[0]
1201 return b
1199 return b
1202
1200
1203 def between(self, pairs):
1201 def between(self, pairs):
1204 r = []
1202 r = []
1205
1203
1206 for top, bottom in pairs:
1204 for top, bottom in pairs:
1207 n, l, i = top, [], 0
1205 n, l, i = top, [], 0
1208 f = 1
1206 f = 1
1209
1207
1210 while n != bottom and n != nullid:
1208 while n != bottom and n != nullid:
1211 p = self.changelog.parents(n)[0]
1209 p = self.changelog.parents(n)[0]
1212 if i == f:
1210 if i == f:
1213 l.append(n)
1211 l.append(n)
1214 f = f * 2
1212 f = f * 2
1215 n = p
1213 n = p
1216 i += 1
1214 i += 1
1217
1215
1218 r.append(l)
1216 r.append(l)
1219
1217
1220 return r
1218 return r
1221
1219
1222 def findincoming(self, remote, base=None, heads=None, force=False):
1220 def findincoming(self, remote, base=None, heads=None, force=False):
1223 """Return list of roots of the subsets of missing nodes from remote
1221 """Return list of roots of the subsets of missing nodes from remote
1224
1222
1225 If base dict is specified, assume that these nodes and their parents
1223 If base dict is specified, assume that these nodes and their parents
1226 exist on the remote side and that no child of a node of base exists
1224 exist on the remote side and that no child of a node of base exists
1227 in both remote and self.
1225 in both remote and self.
1228 Furthermore base will be updated to include the nodes that exists
1226 Furthermore base will be updated to include the nodes that exists
1229 in self and remote but no children exists in self and remote.
1227 in self and remote but no children exists in self and remote.
1230 If a list of heads is specified, return only nodes which are heads
1228 If a list of heads is specified, return only nodes which are heads
1231 or ancestors of these heads.
1229 or ancestors of these heads.
1232
1230
1233 All the ancestors of base are in self and in remote.
1231 All the ancestors of base are in self and in remote.
1234 All the descendants of the list returned are missing in self.
1232 All the descendants of the list returned are missing in self.
1235 (and so we know that the rest of the nodes are missing in remote, see
1233 (and so we know that the rest of the nodes are missing in remote, see
1236 outgoing)
1234 outgoing)
1237 """
1235 """
1238 return self.findcommonincoming(remote, base, heads, force)[1]
1236 return self.findcommonincoming(remote, base, heads, force)[1]
1239
1237
1240 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1238 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1241 """Return a tuple (common, missing roots, heads) used to identify
1239 """Return a tuple (common, missing roots, heads) used to identify
1242 missing nodes from remote.
1240 missing nodes from remote.
1243
1241
1244 If base dict is specified, assume that these nodes and their parents
1242 If base dict is specified, assume that these nodes and their parents
1245 exist on the remote side and that no child of a node of base exists
1243 exist on the remote side and that no child of a node of base exists
1246 in both remote and self.
1244 in both remote and self.
1247 Furthermore base will be updated to include the nodes that exists
1245 Furthermore base will be updated to include the nodes that exists
1248 in self and remote but no children exists in self and remote.
1246 in self and remote but no children exists in self and remote.
1249 If a list of heads is specified, return only nodes which are heads
1247 If a list of heads is specified, return only nodes which are heads
1250 or ancestors of these heads.
1248 or ancestors of these heads.
1251
1249
1252 All the ancestors of base are in self and in remote.
1250 All the ancestors of base are in self and in remote.
1253 """
1251 """
1254 m = self.changelog.nodemap
1252 m = self.changelog.nodemap
1255 search = []
1253 search = []
1256 fetch = set()
1254 fetch = set()
1257 seen = set()
1255 seen = set()
1258 seenbranch = set()
1256 seenbranch = set()
1259 if base is None:
1257 if base is None:
1260 base = {}
1258 base = {}
1261
1259
1262 if not heads:
1260 if not heads:
1263 heads = remote.heads()
1261 heads = remote.heads()
1264
1262
1265 if self.changelog.tip() == nullid:
1263 if self.changelog.tip() == nullid:
1266 base[nullid] = 1
1264 base[nullid] = 1
1267 if heads != [nullid]:
1265 if heads != [nullid]:
1268 return [nullid], [nullid], list(heads)
1266 return [nullid], [nullid], list(heads)
1269 return [nullid], [], []
1267 return [nullid], [], []
1270
1268
1271 # assume we're closer to the tip than the root
1269 # assume we're closer to the tip than the root
1272 # and start by examining the heads
1270 # and start by examining the heads
1273 self.ui.status(_("searching for changes\n"))
1271 self.ui.status(_("searching for changes\n"))
1274
1272
1275 unknown = []
1273 unknown = []
1276 for h in heads:
1274 for h in heads:
1277 if h not in m:
1275 if h not in m:
1278 unknown.append(h)
1276 unknown.append(h)
1279 else:
1277 else:
1280 base[h] = 1
1278 base[h] = 1
1281
1279
1282 heads = unknown
1280 heads = unknown
1283 if not unknown:
1281 if not unknown:
1284 return base.keys(), [], []
1282 return base.keys(), [], []
1285
1283
1286 req = set(unknown)
1284 req = set(unknown)
1287 reqcnt = 0
1285 reqcnt = 0
1288
1286
1289 # search through remote branches
1287 # search through remote branches
1290 # a 'branch' here is a linear segment of history, with four parts:
1288 # a 'branch' here is a linear segment of history, with four parts:
1291 # head, root, first parent, second parent
1289 # head, root, first parent, second parent
1292 # (a branch always has two parents (or none) by definition)
1290 # (a branch always has two parents (or none) by definition)
1293 unknown = remote.branches(unknown)
1291 unknown = remote.branches(unknown)
1294 while unknown:
1292 while unknown:
1295 r = []
1293 r = []
1296 while unknown:
1294 while unknown:
1297 n = unknown.pop(0)
1295 n = unknown.pop(0)
1298 if n[0] in seen:
1296 if n[0] in seen:
1299 continue
1297 continue
1300
1298
1301 self.ui.debug(_("examining %s:%s\n")
1299 self.ui.debug(_("examining %s:%s\n")
1302 % (short(n[0]), short(n[1])))
1300 % (short(n[0]), short(n[1])))
1303 if n[0] == nullid: # found the end of the branch
1301 if n[0] == nullid: # found the end of the branch
1304 pass
1302 pass
1305 elif n in seenbranch:
1303 elif n in seenbranch:
1306 self.ui.debug(_("branch already found\n"))
1304 self.ui.debug(_("branch already found\n"))
1307 continue
1305 continue
1308 elif n[1] and n[1] in m: # do we know the base?
1306 elif n[1] and n[1] in m: # do we know the base?
1309 self.ui.debug(_("found incomplete branch %s:%s\n")
1307 self.ui.debug(_("found incomplete branch %s:%s\n")
1310 % (short(n[0]), short(n[1])))
1308 % (short(n[0]), short(n[1])))
1311 search.append(n[0:2]) # schedule branch range for scanning
1309 search.append(n[0:2]) # schedule branch range for scanning
1312 seenbranch.add(n)
1310 seenbranch.add(n)
1313 else:
1311 else:
1314 if n[1] not in seen and n[1] not in fetch:
1312 if n[1] not in seen and n[1] not in fetch:
1315 if n[2] in m and n[3] in m:
1313 if n[2] in m and n[3] in m:
1316 self.ui.debug(_("found new changeset %s\n") %
1314 self.ui.debug(_("found new changeset %s\n") %
1317 short(n[1]))
1315 short(n[1]))
1318 fetch.add(n[1]) # earliest unknown
1316 fetch.add(n[1]) # earliest unknown
1319 for p in n[2:4]:
1317 for p in n[2:4]:
1320 if p in m:
1318 if p in m:
1321 base[p] = 1 # latest known
1319 base[p] = 1 # latest known
1322
1320
1323 for p in n[2:4]:
1321 for p in n[2:4]:
1324 if p not in req and p not in m:
1322 if p not in req and p not in m:
1325 r.append(p)
1323 r.append(p)
1326 req.add(p)
1324 req.add(p)
1327 seen.add(n[0])
1325 seen.add(n[0])
1328
1326
1329 if r:
1327 if r:
1330 reqcnt += 1
1328 reqcnt += 1
1331 self.ui.debug(_("request %d: %s\n") %
1329 self.ui.debug(_("request %d: %s\n") %
1332 (reqcnt, " ".join(map(short, r))))
1330 (reqcnt, " ".join(map(short, r))))
1333 for p in xrange(0, len(r), 10):
1331 for p in xrange(0, len(r), 10):
1334 for b in remote.branches(r[p:p+10]):
1332 for b in remote.branches(r[p:p+10]):
1335 self.ui.debug(_("received %s:%s\n") %
1333 self.ui.debug(_("received %s:%s\n") %
1336 (short(b[0]), short(b[1])))
1334 (short(b[0]), short(b[1])))
1337 unknown.append(b)
1335 unknown.append(b)
1338
1336
1339 # do binary search on the branches we found
1337 # do binary search on the branches we found
1340 while search:
1338 while search:
1341 newsearch = []
1339 newsearch = []
1342 reqcnt += 1
1340 reqcnt += 1
1343 for n, l in zip(search, remote.between(search)):
1341 for n, l in zip(search, remote.between(search)):
1344 l.append(n[1])
1342 l.append(n[1])
1345 p = n[0]
1343 p = n[0]
1346 f = 1
1344 f = 1
1347 for i in l:
1345 for i in l:
1348 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1346 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1349 if i in m:
1347 if i in m:
1350 if f <= 2:
1348 if f <= 2:
1351 self.ui.debug(_("found new branch changeset %s\n") %
1349 self.ui.debug(_("found new branch changeset %s\n") %
1352 short(p))
1350 short(p))
1353 fetch.add(p)
1351 fetch.add(p)
1354 base[i] = 1
1352 base[i] = 1
1355 else:
1353 else:
1356 self.ui.debug(_("narrowed branch search to %s:%s\n")
1354 self.ui.debug(_("narrowed branch search to %s:%s\n")
1357 % (short(p), short(i)))
1355 % (short(p), short(i)))
1358 newsearch.append((p, i))
1356 newsearch.append((p, i))
1359 break
1357 break
1360 p, f = i, f * 2
1358 p, f = i, f * 2
1361 search = newsearch
1359 search = newsearch
1362
1360
1363 # sanity check our fetch list
1361 # sanity check our fetch list
1364 for f in fetch:
1362 for f in fetch:
1365 if f in m:
1363 if f in m:
1366 raise error.RepoError(_("already have changeset ")
1364 raise error.RepoError(_("already have changeset ")
1367 + short(f[:4]))
1365 + short(f[:4]))
1368
1366
1369 if base.keys() == [nullid]:
1367 if base.keys() == [nullid]:
1370 if force:
1368 if force:
1371 self.ui.warn(_("warning: repository is unrelated\n"))
1369 self.ui.warn(_("warning: repository is unrelated\n"))
1372 else:
1370 else:
1373 raise util.Abort(_("repository is unrelated"))
1371 raise util.Abort(_("repository is unrelated"))
1374
1372
1375 self.ui.debug(_("found new changesets starting at ") +
1373 self.ui.debug(_("found new changesets starting at ") +
1376 " ".join([short(f) for f in fetch]) + "\n")
1374 " ".join([short(f) for f in fetch]) + "\n")
1377
1375
1378 self.ui.debug(_("%d total queries\n") % reqcnt)
1376 self.ui.debug(_("%d total queries\n") % reqcnt)
1379
1377
1380 return base.keys(), list(fetch), heads
1378 return base.keys(), list(fetch), heads
1381
1379
1382 def findoutgoing(self, remote, base=None, heads=None, force=False):
1380 def findoutgoing(self, remote, base=None, heads=None, force=False):
1383 """Return list of nodes that are roots of subsets not in remote
1381 """Return list of nodes that are roots of subsets not in remote
1384
1382
1385 If base dict is specified, assume that these nodes and their parents
1383 If base dict is specified, assume that these nodes and their parents
1386 exist on the remote side.
1384 exist on the remote side.
1387 If a list of heads is specified, return only nodes which are heads
1385 If a list of heads is specified, return only nodes which are heads
1388 or ancestors of these heads, and return a second element which
1386 or ancestors of these heads, and return a second element which
1389 contains all remote heads which get new children.
1387 contains all remote heads which get new children.
1390 """
1388 """
1391 if base is None:
1389 if base is None:
1392 base = {}
1390 base = {}
1393 self.findincoming(remote, base, heads, force=force)
1391 self.findincoming(remote, base, heads, force=force)
1394
1392
1395 self.ui.debug(_("common changesets up to ")
1393 self.ui.debug(_("common changesets up to ")
1396 + " ".join(map(short, base.keys())) + "\n")
1394 + " ".join(map(short, base.keys())) + "\n")
1397
1395
1398 remain = set(self.changelog.nodemap)
1396 remain = set(self.changelog.nodemap)
1399
1397
1400 # prune everything remote has from the tree
1398 # prune everything remote has from the tree
1401 remain.remove(nullid)
1399 remain.remove(nullid)
1402 remove = base.keys()
1400 remove = base.keys()
1403 while remove:
1401 while remove:
1404 n = remove.pop(0)
1402 n = remove.pop(0)
1405 if n in remain:
1403 if n in remain:
1406 remain.remove(n)
1404 remain.remove(n)
1407 for p in self.changelog.parents(n):
1405 for p in self.changelog.parents(n):
1408 remove.append(p)
1406 remove.append(p)
1409
1407
1410 # find every node whose parents have been pruned
1408 # find every node whose parents have been pruned
1411 subset = []
1409 subset = []
1412 # find every remote head that will get new children
1410 # find every remote head that will get new children
1413 updated_heads = set()
1411 updated_heads = set()
1414 for n in remain:
1412 for n in remain:
1415 p1, p2 = self.changelog.parents(n)
1413 p1, p2 = self.changelog.parents(n)
1416 if p1 not in remain and p2 not in remain:
1414 if p1 not in remain and p2 not in remain:
1417 subset.append(n)
1415 subset.append(n)
1418 if heads:
1416 if heads:
1419 if p1 in heads:
1417 if p1 in heads:
1420 updated_heads.add(p1)
1418 updated_heads.add(p1)
1421 if p2 in heads:
1419 if p2 in heads:
1422 updated_heads.add(p2)
1420 updated_heads.add(p2)
1423
1421
1424 # this is the set of all roots we have to push
1422 # this is the set of all roots we have to push
1425 if heads:
1423 if heads:
1426 return subset, list(updated_heads)
1424 return subset, list(updated_heads)
1427 else:
1425 else:
1428 return subset
1426 return subset
1429
1427
1430 def pull(self, remote, heads=None, force=False):
1428 def pull(self, remote, heads=None, force=False):
1431 lock = self.lock()
1429 lock = self.lock()
1432 try:
1430 try:
1433 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1431 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1434 force=force)
1432 force=force)
1435 if fetch == [nullid]:
1433 if fetch == [nullid]:
1436 self.ui.status(_("requesting all changes\n"))
1434 self.ui.status(_("requesting all changes\n"))
1437
1435
1438 if not fetch:
1436 if not fetch:
1439 self.ui.status(_("no changes found\n"))
1437 self.ui.status(_("no changes found\n"))
1440 return 0
1438 return 0
1441
1439
1442 if heads is None and remote.capable('changegroupsubset'):
1440 if heads is None and remote.capable('changegroupsubset'):
1443 heads = rheads
1441 heads = rheads
1444
1442
1445 if heads is None:
1443 if heads is None:
1446 cg = remote.changegroup(fetch, 'pull')
1444 cg = remote.changegroup(fetch, 'pull')
1447 else:
1445 else:
1448 if not remote.capable('changegroupsubset'):
1446 if not remote.capable('changegroupsubset'):
1449 raise util.Abort(_("Partial pull cannot be done because "
1447 raise util.Abort(_("Partial pull cannot be done because "
1450 "other repository doesn't support "
1448 "other repository doesn't support "
1451 "changegroupsubset."))
1449 "changegroupsubset."))
1452 cg = remote.changegroupsubset(fetch, heads, 'pull')
1450 cg = remote.changegroupsubset(fetch, heads, 'pull')
1453 return self.addchangegroup(cg, 'pull', remote.url())
1451 return self.addchangegroup(cg, 'pull', remote.url())
1454 finally:
1452 finally:
1455 lock.release()
1453 lock.release()
1456
1454
1457 def push(self, remote, force=False, revs=None):
1455 def push(self, remote, force=False, revs=None):
1458 # there are two ways to push to remote repo:
1456 # there are two ways to push to remote repo:
1459 #
1457 #
1460 # addchangegroup assumes local user can lock remote
1458 # addchangegroup assumes local user can lock remote
1461 # repo (local filesystem, old ssh servers).
1459 # repo (local filesystem, old ssh servers).
1462 #
1460 #
1463 # unbundle assumes local user cannot lock remote repo (new ssh
1461 # unbundle assumes local user cannot lock remote repo (new ssh
1464 # servers, http servers).
1462 # servers, http servers).
1465
1463
1466 if remote.capable('unbundle'):
1464 if remote.capable('unbundle'):
1467 return self.push_unbundle(remote, force, revs)
1465 return self.push_unbundle(remote, force, revs)
1468 return self.push_addchangegroup(remote, force, revs)
1466 return self.push_addchangegroup(remote, force, revs)
1469
1467
1470 def prepush(self, remote, force, revs):
1468 def prepush(self, remote, force, revs):
1471 common = {}
1469 common = {}
1472 remote_heads = remote.heads()
1470 remote_heads = remote.heads()
1473 inc = self.findincoming(remote, common, remote_heads, force=force)
1471 inc = self.findincoming(remote, common, remote_heads, force=force)
1474
1472
1475 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1473 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1476 if revs is not None:
1474 if revs is not None:
1477 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1475 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1478 else:
1476 else:
1479 bases, heads = update, self.changelog.heads()
1477 bases, heads = update, self.changelog.heads()
1480
1478
1481 def checkbranch(lheads, rheads, updatelh):
1479 def checkbranch(lheads, rheads, updatelh):
1482 '''
1480 '''
1483 check whether there are more local heads than remote heads on
1481 check whether there are more local heads than remote heads on
1484 a specific branch.
1482 a specific branch.
1485
1483
1486 lheads: local branch heads
1484 lheads: local branch heads
1487 rheads: remote branch heads
1485 rheads: remote branch heads
1488 updatelh: outgoing local branch heads
1486 updatelh: outgoing local branch heads
1489 '''
1487 '''
1490
1488
1491 warn = 0
1489 warn = 0
1492
1490
1493 if not revs and len(lheads) > len(rheads):
1491 if not revs and len(lheads) > len(rheads):
1494 warn = 1
1492 warn = 1
1495 else:
1493 else:
1496 updatelheads = [self.changelog.heads(x, lheads)
1494 updatelheads = [self.changelog.heads(x, lheads)
1497 for x in updatelh]
1495 for x in updatelh]
1498 newheads = set(sum(updatelheads, [])) & set(lheads)
1496 newheads = set(sum(updatelheads, [])) & set(lheads)
1499
1497
1500 if not newheads:
1498 if not newheads:
1501 return True
1499 return True
1502
1500
1503 for r in rheads:
1501 for r in rheads:
1504 if r in self.changelog.nodemap:
1502 if r in self.changelog.nodemap:
1505 desc = self.changelog.heads(r, heads)
1503 desc = self.changelog.heads(r, heads)
1506 l = [h for h in heads if h in desc]
1504 l = [h for h in heads if h in desc]
1507 if not l:
1505 if not l:
1508 newheads.add(r)
1506 newheads.add(r)
1509 else:
1507 else:
1510 newheads.add(r)
1508 newheads.add(r)
1511 if len(newheads) > len(rheads):
1509 if len(newheads) > len(rheads):
1512 warn = 1
1510 warn = 1
1513
1511
1514 if warn:
1512 if warn:
1515 if not rheads: # new branch requires --force
1513 if not rheads: # new branch requires --force
1516 self.ui.warn(_("abort: push creates new"
1514 self.ui.warn(_("abort: push creates new"
1517 " remote branch '%s'!\n" %
1515 " remote branch '%s'!\n" %
1518 self[updatelh[0]].branch()))
1516 self[updatelh[0]].branch()))
1519 else:
1517 else:
1520 self.ui.warn(_("abort: push creates new remote heads!\n"))
1518 self.ui.warn(_("abort: push creates new remote heads!\n"))
1521
1519
1522 self.ui.status(_("(did you forget to merge?"
1520 self.ui.status(_("(did you forget to merge?"
1523 " use push -f to force)\n"))
1521 " use push -f to force)\n"))
1524 return False
1522 return False
1525 return True
1523 return True
1526
1524
1527 if not bases:
1525 if not bases:
1528 self.ui.status(_("no changes found\n"))
1526 self.ui.status(_("no changes found\n"))
1529 return None, 1
1527 return None, 1
1530 elif not force:
1528 elif not force:
1531 # Check for each named branch if we're creating new remote heads.
1529 # Check for each named branch if we're creating new remote heads.
1532 # To be a remote head after push, node must be either:
1530 # To be a remote head after push, node must be either:
1533 # - unknown locally
1531 # - unknown locally
1534 # - a local outgoing head descended from update
1532 # - a local outgoing head descended from update
1535 # - a remote head that's known locally and not
1533 # - a remote head that's known locally and not
1536 # ancestral to an outgoing head
1534 # ancestral to an outgoing head
1537 #
1535 #
1538 # New named branches cannot be created without --force.
1536 # New named branches cannot be created without --force.
1539
1537
1540 if remote_heads != [nullid]:
1538 if remote_heads != [nullid]:
1541 if remote.capable('branchmap'):
1539 if remote.capable('branchmap'):
1542 localhds = {}
1540 localhds = {}
1543 if not revs:
1541 if not revs:
1544 localhds = self.branchmap()
1542 localhds = self.branchmap()
1545 else:
1543 else:
1546 for n in heads:
1544 for n in heads:
1547 branch = self[n].branch()
1545 branch = self[n].branch()
1548 if branch in localhds:
1546 if branch in localhds:
1549 localhds[branch].append(n)
1547 localhds[branch].append(n)
1550 else:
1548 else:
1551 localhds[branch] = [n]
1549 localhds[branch] = [n]
1552
1550
1553 remotehds = remote.branchmap()
1551 remotehds = remote.branchmap()
1554
1552
1555 for lh in localhds:
1553 for lh in localhds:
1556 if lh in remotehds:
1554 if lh in remotehds:
1557 rheads = remotehds[lh]
1555 rheads = remotehds[lh]
1558 else:
1556 else:
1559 rheads = []
1557 rheads = []
1560 lheads = localhds[lh]
1558 lheads = localhds[lh]
1561 updatelh = [upd for upd in update
1559 updatelh = [upd for upd in update
1562 if self[upd].branch() == lh]
1560 if self[upd].branch() == lh]
1563 if not updatelh:
1561 if not updatelh:
1564 continue
1562 continue
1565 if not checkbranch(lheads, rheads, updatelh):
1563 if not checkbranch(lheads, rheads, updatelh):
1566 return None, 0
1564 return None, 0
1567 else:
1565 else:
1568 if not checkbranch(heads, remote_heads, update):
1566 if not checkbranch(heads, remote_heads, update):
1569 return None, 0
1567 return None, 0
1570
1568
1571 if inc:
1569 if inc:
1572 self.ui.warn(_("note: unsynced remote changes!\n"))
1570 self.ui.warn(_("note: unsynced remote changes!\n"))
1573
1571
1574
1572
1575 if revs is None:
1573 if revs is None:
1576 # use the fast path, no race possible on push
1574 # use the fast path, no race possible on push
1577 cg = self._changegroup(common.keys(), 'push')
1575 cg = self._changegroup(common.keys(), 'push')
1578 else:
1576 else:
1579 cg = self.changegroupsubset(update, revs, 'push')
1577 cg = self.changegroupsubset(update, revs, 'push')
1580 return cg, remote_heads
1578 return cg, remote_heads
1581
1579
1582 def push_addchangegroup(self, remote, force, revs):
1580 def push_addchangegroup(self, remote, force, revs):
1583 lock = remote.lock()
1581 lock = remote.lock()
1584 try:
1582 try:
1585 ret = self.prepush(remote, force, revs)
1583 ret = self.prepush(remote, force, revs)
1586 if ret[0] is not None:
1584 if ret[0] is not None:
1587 cg, remote_heads = ret
1585 cg, remote_heads = ret
1588 return remote.addchangegroup(cg, 'push', self.url())
1586 return remote.addchangegroup(cg, 'push', self.url())
1589 return ret[1]
1587 return ret[1]
1590 finally:
1588 finally:
1591 lock.release()
1589 lock.release()
1592
1590
1593 def push_unbundle(self, remote, force, revs):
1591 def push_unbundle(self, remote, force, revs):
1594 # local repo finds heads on server, finds out what revs it
1592 # local repo finds heads on server, finds out what revs it
1595 # must push. once revs transferred, if server finds it has
1593 # must push. once revs transferred, if server finds it has
1596 # different heads (someone else won commit/push race), server
1594 # different heads (someone else won commit/push race), server
1597 # aborts.
1595 # aborts.
1598
1596
1599 ret = self.prepush(remote, force, revs)
1597 ret = self.prepush(remote, force, revs)
1600 if ret[0] is not None:
1598 if ret[0] is not None:
1601 cg, remote_heads = ret
1599 cg, remote_heads = ret
1602 if force: remote_heads = ['force']
1600 if force: remote_heads = ['force']
1603 return remote.unbundle(cg, remote_heads, 'push')
1601 return remote.unbundle(cg, remote_heads, 'push')
1604 return ret[1]
1602 return ret[1]
1605
1603
1606 def changegroupinfo(self, nodes, source):
1604 def changegroupinfo(self, nodes, source):
1607 if self.ui.verbose or source == 'bundle':
1605 if self.ui.verbose or source == 'bundle':
1608 self.ui.status(_("%d changesets found\n") % len(nodes))
1606 self.ui.status(_("%d changesets found\n") % len(nodes))
1609 if self.ui.debugflag:
1607 if self.ui.debugflag:
1610 self.ui.debug(_("list of changesets:\n"))
1608 self.ui.debug(_("list of changesets:\n"))
1611 for node in nodes:
1609 for node in nodes:
1612 self.ui.debug("%s\n" % hex(node))
1610 self.ui.debug("%s\n" % hex(node))
1613
1611
1614 def changegroupsubset(self, bases, heads, source, extranodes=None):
1612 def changegroupsubset(self, bases, heads, source, extranodes=None):
1615 """This function generates a changegroup consisting of all the nodes
1613 """This function generates a changegroup consisting of all the nodes
1616 that are descendents of any of the bases, and ancestors of any of
1614 that are descendents of any of the bases, and ancestors of any of
1617 the heads.
1615 the heads.
1618
1616
1619 It is fairly complex as determining which filenodes and which
1617 It is fairly complex as determining which filenodes and which
1620 manifest nodes need to be included for the changeset to be complete
1618 manifest nodes need to be included for the changeset to be complete
1621 is non-trivial.
1619 is non-trivial.
1622
1620
1623 Another wrinkle is doing the reverse, figuring out which changeset in
1621 Another wrinkle is doing the reverse, figuring out which changeset in
1624 the changegroup a particular filenode or manifestnode belongs to.
1622 the changegroup a particular filenode or manifestnode belongs to.
1625
1623
1626 The caller can specify some nodes that must be included in the
1624 The caller can specify some nodes that must be included in the
1627 changegroup using the extranodes argument. It should be a dict
1625 changegroup using the extranodes argument. It should be a dict
1628 where the keys are the filenames (or 1 for the manifest), and the
1626 where the keys are the filenames (or 1 for the manifest), and the
1629 values are lists of (node, linknode) tuples, where node is a wanted
1627 values are lists of (node, linknode) tuples, where node is a wanted
1630 node and linknode is the changelog node that should be transmitted as
1628 node and linknode is the changelog node that should be transmitted as
1631 the linkrev.
1629 the linkrev.
1632 """
1630 """
1633
1631
1634 if extranodes is None:
1632 if extranodes is None:
1635 # can we go through the fast path ?
1633 # can we go through the fast path ?
1636 heads.sort()
1634 heads.sort()
1637 allheads = self.heads()
1635 allheads = self.heads()
1638 allheads.sort()
1636 allheads.sort()
1639 if heads == allheads:
1637 if heads == allheads:
1640 common = []
1638 common = []
1641 # parents of bases are known from both sides
1639 # parents of bases are known from both sides
1642 for n in bases:
1640 for n in bases:
1643 for p in self.changelog.parents(n):
1641 for p in self.changelog.parents(n):
1644 if p != nullid:
1642 if p != nullid:
1645 common.append(p)
1643 common.append(p)
1646 return self._changegroup(common, source)
1644 return self._changegroup(common, source)
1647
1645
1648 self.hook('preoutgoing', throw=True, source=source)
1646 self.hook('preoutgoing', throw=True, source=source)
1649
1647
1650 # Set up some initial variables
1648 # Set up some initial variables
1651 # Make it easy to refer to self.changelog
1649 # Make it easy to refer to self.changelog
1652 cl = self.changelog
1650 cl = self.changelog
1653 # msng is short for missing - compute the list of changesets in this
1651 # msng is short for missing - compute the list of changesets in this
1654 # changegroup.
1652 # changegroup.
1655 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1653 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1656 self.changegroupinfo(msng_cl_lst, source)
1654 self.changegroupinfo(msng_cl_lst, source)
1657 # Some bases may turn out to be superfluous, and some heads may be
1655 # Some bases may turn out to be superfluous, and some heads may be
1658 # too. nodesbetween will return the minimal set of bases and heads
1656 # too. nodesbetween will return the minimal set of bases and heads
1659 # necessary to re-create the changegroup.
1657 # necessary to re-create the changegroup.
1660
1658
1661 # Known heads are the list of heads that it is assumed the recipient
1659 # Known heads are the list of heads that it is assumed the recipient
1662 # of this changegroup will know about.
1660 # of this changegroup will know about.
1663 knownheads = set()
1661 knownheads = set()
1664 # We assume that all parents of bases are known heads.
1662 # We assume that all parents of bases are known heads.
1665 for n in bases:
1663 for n in bases:
1666 knownheads.update(cl.parents(n))
1664 knownheads.update(cl.parents(n))
1667 knownheads.discard(nullid)
1665 knownheads.discard(nullid)
1668 knownheads = list(knownheads)
1666 knownheads = list(knownheads)
1669 if knownheads:
1667 if knownheads:
1670 # Now that we know what heads are known, we can compute which
1668 # Now that we know what heads are known, we can compute which
1671 # changesets are known. The recipient must know about all
1669 # changesets are known. The recipient must know about all
1672 # changesets required to reach the known heads from the null
1670 # changesets required to reach the known heads from the null
1673 # changeset.
1671 # changeset.
1674 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1672 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1675 junk = None
1673 junk = None
1676 # Transform the list into a set.
1674 # Transform the list into a set.
1677 has_cl_set = set(has_cl_set)
1675 has_cl_set = set(has_cl_set)
1678 else:
1676 else:
1679 # If there were no known heads, the recipient cannot be assumed to
1677 # If there were no known heads, the recipient cannot be assumed to
1680 # know about any changesets.
1678 # know about any changesets.
1681 has_cl_set = set()
1679 has_cl_set = set()
1682
1680
1683 # Make it easy to refer to self.manifest
1681 # Make it easy to refer to self.manifest
1684 mnfst = self.manifest
1682 mnfst = self.manifest
1685 # We don't know which manifests are missing yet
1683 # We don't know which manifests are missing yet
1686 msng_mnfst_set = {}
1684 msng_mnfst_set = {}
1687 # Nor do we know which filenodes are missing.
1685 # Nor do we know which filenodes are missing.
1688 msng_filenode_set = {}
1686 msng_filenode_set = {}
1689
1687
1690 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1688 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1691 junk = None
1689 junk = None
1692
1690
1693 # A changeset always belongs to itself, so the changenode lookup
1691 # A changeset always belongs to itself, so the changenode lookup
1694 # function for a changenode is identity.
1692 # function for a changenode is identity.
1695 def identity(x):
1693 def identity(x):
1696 return x
1694 return x
1697
1695
1698 # A function generating function. Sets up an environment for the
1696 # A function generating function. Sets up an environment for the
1699 # inner function.
1697 # inner function.
1700 def cmp_by_rev_func(revlog):
1698 def cmp_by_rev_func(revlog):
1701 # Compare two nodes by their revision number in the environment's
1699 # Compare two nodes by their revision number in the environment's
1702 # revision history. Since the revision number both represents the
1700 # revision history. Since the revision number both represents the
1703 # most efficient order to read the nodes in, and represents a
1701 # most efficient order to read the nodes in, and represents a
1704 # topological sorting of the nodes, this function is often useful.
1702 # topological sorting of the nodes, this function is often useful.
1705 def cmp_by_rev(a, b):
1703 def cmp_by_rev(a, b):
1706 return cmp(revlog.rev(a), revlog.rev(b))
1704 return cmp(revlog.rev(a), revlog.rev(b))
1707 return cmp_by_rev
1705 return cmp_by_rev
1708
1706
1709 # If we determine that a particular file or manifest node must be a
1707 # If we determine that a particular file or manifest node must be a
1710 # node that the recipient of the changegroup will already have, we can
1708 # node that the recipient of the changegroup will already have, we can
1711 # also assume the recipient will have all the parents. This function
1709 # also assume the recipient will have all the parents. This function
1712 # prunes them from the set of missing nodes.
1710 # prunes them from the set of missing nodes.
1713 def prune_parents(revlog, hasset, msngset):
1711 def prune_parents(revlog, hasset, msngset):
1714 haslst = list(hasset)
1712 haslst = list(hasset)
1715 haslst.sort(cmp_by_rev_func(revlog))
1713 haslst.sort(cmp_by_rev_func(revlog))
1716 for node in haslst:
1714 for node in haslst:
1717 parentlst = [p for p in revlog.parents(node) if p != nullid]
1715 parentlst = [p for p in revlog.parents(node) if p != nullid]
1718 while parentlst:
1716 while parentlst:
1719 n = parentlst.pop()
1717 n = parentlst.pop()
1720 if n not in hasset:
1718 if n not in hasset:
1721 hasset.add(n)
1719 hasset.add(n)
1722 p = [p for p in revlog.parents(n) if p != nullid]
1720 p = [p for p in revlog.parents(n) if p != nullid]
1723 parentlst.extend(p)
1721 parentlst.extend(p)
1724 for n in hasset:
1722 for n in hasset:
1725 msngset.pop(n, None)
1723 msngset.pop(n, None)
1726
1724
1727 # This is a function generating function used to set up an environment
1725 # This is a function generating function used to set up an environment
1728 # for the inner function to execute in.
1726 # for the inner function to execute in.
1729 def manifest_and_file_collector(changedfileset):
1727 def manifest_and_file_collector(changedfileset):
1730 # This is an information gathering function that gathers
1728 # This is an information gathering function that gathers
1731 # information from each changeset node that goes out as part of
1729 # information from each changeset node that goes out as part of
1732 # the changegroup. The information gathered is a list of which
1730 # the changegroup. The information gathered is a list of which
1733 # manifest nodes are potentially required (the recipient may
1731 # manifest nodes are potentially required (the recipient may
1734 # already have them) and total list of all files which were
1732 # already have them) and total list of all files which were
1735 # changed in any changeset in the changegroup.
1733 # changed in any changeset in the changegroup.
1736 #
1734 #
1737 # We also remember the first changenode we saw any manifest
1735 # We also remember the first changenode we saw any manifest
1738 # referenced by so we can later determine which changenode 'owns'
1736 # referenced by so we can later determine which changenode 'owns'
1739 # the manifest.
1737 # the manifest.
1740 def collect_manifests_and_files(clnode):
1738 def collect_manifests_and_files(clnode):
1741 c = cl.read(clnode)
1739 c = cl.read(clnode)
1742 for f in c[3]:
1740 for f in c[3]:
1743 # This is to make sure we only have one instance of each
1741 # This is to make sure we only have one instance of each
1744 # filename string for each filename.
1742 # filename string for each filename.
1745 changedfileset.setdefault(f, f)
1743 changedfileset.setdefault(f, f)
1746 msng_mnfst_set.setdefault(c[0], clnode)
1744 msng_mnfst_set.setdefault(c[0], clnode)
1747 return collect_manifests_and_files
1745 return collect_manifests_and_files
1748
1746
1749 # Figure out which manifest nodes (of the ones we think might be part
1747 # Figure out which manifest nodes (of the ones we think might be part
1750 # of the changegroup) the recipient must know about and remove them
1748 # of the changegroup) the recipient must know about and remove them
1751 # from the changegroup.
1749 # from the changegroup.
1752 def prune_manifests():
1750 def prune_manifests():
1753 has_mnfst_set = set()
1751 has_mnfst_set = set()
1754 for n in msng_mnfst_set:
1752 for n in msng_mnfst_set:
1755 # If a 'missing' manifest thinks it belongs to a changenode
1753 # If a 'missing' manifest thinks it belongs to a changenode
1756 # the recipient is assumed to have, obviously the recipient
1754 # the recipient is assumed to have, obviously the recipient
1757 # must have that manifest.
1755 # must have that manifest.
1758 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1756 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1759 if linknode in has_cl_set:
1757 if linknode in has_cl_set:
1760 has_mnfst_set.add(n)
1758 has_mnfst_set.add(n)
1761 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1759 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1762
1760
1763 # Use the information collected in collect_manifests_and_files to say
1761 # Use the information collected in collect_manifests_and_files to say
1764 # which changenode any manifestnode belongs to.
1762 # which changenode any manifestnode belongs to.
1765 def lookup_manifest_link(mnfstnode):
1763 def lookup_manifest_link(mnfstnode):
1766 return msng_mnfst_set[mnfstnode]
1764 return msng_mnfst_set[mnfstnode]
1767
1765
1768 # A function generating function that sets up the initial environment
1766 # A function generating function that sets up the initial environment
1769 # the inner function.
1767 # the inner function.
1770 def filenode_collector(changedfiles):
1768 def filenode_collector(changedfiles):
1771 next_rev = [0]
1769 next_rev = [0]
1772 # This gathers information from each manifestnode included in the
1770 # This gathers information from each manifestnode included in the
1773 # changegroup about which filenodes the manifest node references
1771 # changegroup about which filenodes the manifest node references
1774 # so we can include those in the changegroup too.
1772 # so we can include those in the changegroup too.
1775 #
1773 #
1776 # It also remembers which changenode each filenode belongs to. It
1774 # It also remembers which changenode each filenode belongs to. It
1777 # does this by assuming the a filenode belongs to the changenode
1775 # does this by assuming the a filenode belongs to the changenode
1778 # the first manifest that references it belongs to.
1776 # the first manifest that references it belongs to.
1779 def collect_msng_filenodes(mnfstnode):
1777 def collect_msng_filenodes(mnfstnode):
1780 r = mnfst.rev(mnfstnode)
1778 r = mnfst.rev(mnfstnode)
1781 if r == next_rev[0]:
1779 if r == next_rev[0]:
1782 # If the last rev we looked at was the one just previous,
1780 # If the last rev we looked at was the one just previous,
1783 # we only need to see a diff.
1781 # we only need to see a diff.
1784 deltamf = mnfst.readdelta(mnfstnode)
1782 deltamf = mnfst.readdelta(mnfstnode)
1785 # For each line in the delta
1783 # For each line in the delta
1786 for f, fnode in deltamf.iteritems():
1784 for f, fnode in deltamf.iteritems():
1787 f = changedfiles.get(f, None)
1785 f = changedfiles.get(f, None)
1788 # And if the file is in the list of files we care
1786 # And if the file is in the list of files we care
1789 # about.
1787 # about.
1790 if f is not None:
1788 if f is not None:
1791 # Get the changenode this manifest belongs to
1789 # Get the changenode this manifest belongs to
1792 clnode = msng_mnfst_set[mnfstnode]
1790 clnode = msng_mnfst_set[mnfstnode]
1793 # Create the set of filenodes for the file if
1791 # Create the set of filenodes for the file if
1794 # there isn't one already.
1792 # there isn't one already.
1795 ndset = msng_filenode_set.setdefault(f, {})
1793 ndset = msng_filenode_set.setdefault(f, {})
1796 # And set the filenode's changelog node to the
1794 # And set the filenode's changelog node to the
1797 # manifest's if it hasn't been set already.
1795 # manifest's if it hasn't been set already.
1798 ndset.setdefault(fnode, clnode)
1796 ndset.setdefault(fnode, clnode)
1799 else:
1797 else:
1800 # Otherwise we need a full manifest.
1798 # Otherwise we need a full manifest.
1801 m = mnfst.read(mnfstnode)
1799 m = mnfst.read(mnfstnode)
1802 # For every file in we care about.
1800 # For every file in we care about.
1803 for f in changedfiles:
1801 for f in changedfiles:
1804 fnode = m.get(f, None)
1802 fnode = m.get(f, None)
1805 # If it's in the manifest
1803 # If it's in the manifest
1806 if fnode is not None:
1804 if fnode is not None:
1807 # See comments above.
1805 # See comments above.
1808 clnode = msng_mnfst_set[mnfstnode]
1806 clnode = msng_mnfst_set[mnfstnode]
1809 ndset = msng_filenode_set.setdefault(f, {})
1807 ndset = msng_filenode_set.setdefault(f, {})
1810 ndset.setdefault(fnode, clnode)
1808 ndset.setdefault(fnode, clnode)
1811 # Remember the revision we hope to see next.
1809 # Remember the revision we hope to see next.
1812 next_rev[0] = r + 1
1810 next_rev[0] = r + 1
1813 return collect_msng_filenodes
1811 return collect_msng_filenodes
1814
1812
1815 # We have a list of filenodes we think we need for a file, lets remove
1813 # We have a list of filenodes we think we need for a file, lets remove
1816 # all those we know the recipient must have.
1814 # all those we know the recipient must have.
1817 def prune_filenodes(f, filerevlog):
1815 def prune_filenodes(f, filerevlog):
1818 msngset = msng_filenode_set[f]
1816 msngset = msng_filenode_set[f]
1819 hasset = set()
1817 hasset = set()
1820 # If a 'missing' filenode thinks it belongs to a changenode we
1818 # If a 'missing' filenode thinks it belongs to a changenode we
1821 # assume the recipient must have, then the recipient must have
1819 # assume the recipient must have, then the recipient must have
1822 # that filenode.
1820 # that filenode.
1823 for n in msngset:
1821 for n in msngset:
1824 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1822 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1825 if clnode in has_cl_set:
1823 if clnode in has_cl_set:
1826 hasset.add(n)
1824 hasset.add(n)
1827 prune_parents(filerevlog, hasset, msngset)
1825 prune_parents(filerevlog, hasset, msngset)
1828
1826
1829 # A function generator function that sets up the a context for the
1827 # A function generator function that sets up the a context for the
1830 # inner function.
1828 # inner function.
1831 def lookup_filenode_link_func(fname):
1829 def lookup_filenode_link_func(fname):
1832 msngset = msng_filenode_set[fname]
1830 msngset = msng_filenode_set[fname]
1833 # Lookup the changenode the filenode belongs to.
1831 # Lookup the changenode the filenode belongs to.
1834 def lookup_filenode_link(fnode):
1832 def lookup_filenode_link(fnode):
1835 return msngset[fnode]
1833 return msngset[fnode]
1836 return lookup_filenode_link
1834 return lookup_filenode_link
1837
1835
1838 # Add the nodes that were explicitly requested.
1836 # Add the nodes that were explicitly requested.
1839 def add_extra_nodes(name, nodes):
1837 def add_extra_nodes(name, nodes):
1840 if not extranodes or name not in extranodes:
1838 if not extranodes or name not in extranodes:
1841 return
1839 return
1842
1840
1843 for node, linknode in extranodes[name]:
1841 for node, linknode in extranodes[name]:
1844 if node not in nodes:
1842 if node not in nodes:
1845 nodes[node] = linknode
1843 nodes[node] = linknode
1846
1844
1847 # Now that we have all theses utility functions to help out and
1845 # Now that we have all theses utility functions to help out and
1848 # logically divide up the task, generate the group.
1846 # logically divide up the task, generate the group.
1849 def gengroup():
1847 def gengroup():
1850 # The set of changed files starts empty.
1848 # The set of changed files starts empty.
1851 changedfiles = {}
1849 changedfiles = {}
1852 # Create a changenode group generator that will call our functions
1850 # Create a changenode group generator that will call our functions
1853 # back to lookup the owning changenode and collect information.
1851 # back to lookup the owning changenode and collect information.
1854 group = cl.group(msng_cl_lst, identity,
1852 group = cl.group(msng_cl_lst, identity,
1855 manifest_and_file_collector(changedfiles))
1853 manifest_and_file_collector(changedfiles))
1856 for chnk in group:
1854 for chnk in group:
1857 yield chnk
1855 yield chnk
1858
1856
1859 # The list of manifests has been collected by the generator
1857 # The list of manifests has been collected by the generator
1860 # calling our functions back.
1858 # calling our functions back.
1861 prune_manifests()
1859 prune_manifests()
1862 add_extra_nodes(1, msng_mnfst_set)
1860 add_extra_nodes(1, msng_mnfst_set)
1863 msng_mnfst_lst = msng_mnfst_set.keys()
1861 msng_mnfst_lst = msng_mnfst_set.keys()
1864 # Sort the manifestnodes by revision number.
1862 # Sort the manifestnodes by revision number.
1865 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1863 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1866 # Create a generator for the manifestnodes that calls our lookup
1864 # Create a generator for the manifestnodes that calls our lookup
1867 # and data collection functions back.
1865 # and data collection functions back.
1868 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1866 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1869 filenode_collector(changedfiles))
1867 filenode_collector(changedfiles))
1870 for chnk in group:
1868 for chnk in group:
1871 yield chnk
1869 yield chnk
1872
1870
1873 # These are no longer needed, dereference and toss the memory for
1871 # These are no longer needed, dereference and toss the memory for
1874 # them.
1872 # them.
1875 msng_mnfst_lst = None
1873 msng_mnfst_lst = None
1876 msng_mnfst_set.clear()
1874 msng_mnfst_set.clear()
1877
1875
1878 if extranodes:
1876 if extranodes:
1879 for fname in extranodes:
1877 for fname in extranodes:
1880 if isinstance(fname, int):
1878 if isinstance(fname, int):
1881 continue
1879 continue
1882 msng_filenode_set.setdefault(fname, {})
1880 msng_filenode_set.setdefault(fname, {})
1883 changedfiles[fname] = 1
1881 changedfiles[fname] = 1
1884 # Go through all our files in order sorted by name.
1882 # Go through all our files in order sorted by name.
1885 for fname in sorted(changedfiles):
1883 for fname in sorted(changedfiles):
1886 filerevlog = self.file(fname)
1884 filerevlog = self.file(fname)
1887 if not len(filerevlog):
1885 if not len(filerevlog):
1888 raise util.Abort(_("empty or missing revlog for %s") % fname)
1886 raise util.Abort(_("empty or missing revlog for %s") % fname)
1889 # Toss out the filenodes that the recipient isn't really
1887 # Toss out the filenodes that the recipient isn't really
1890 # missing.
1888 # missing.
1891 if fname in msng_filenode_set:
1889 if fname in msng_filenode_set:
1892 prune_filenodes(fname, filerevlog)
1890 prune_filenodes(fname, filerevlog)
1893 add_extra_nodes(fname, msng_filenode_set[fname])
1891 add_extra_nodes(fname, msng_filenode_set[fname])
1894 msng_filenode_lst = msng_filenode_set[fname].keys()
1892 msng_filenode_lst = msng_filenode_set[fname].keys()
1895 else:
1893 else:
1896 msng_filenode_lst = []
1894 msng_filenode_lst = []
1897 # If any filenodes are left, generate the group for them,
1895 # If any filenodes are left, generate the group for them,
1898 # otherwise don't bother.
1896 # otherwise don't bother.
1899 if len(msng_filenode_lst) > 0:
1897 if len(msng_filenode_lst) > 0:
1900 yield changegroup.chunkheader(len(fname))
1898 yield changegroup.chunkheader(len(fname))
1901 yield fname
1899 yield fname
1902 # Sort the filenodes by their revision #
1900 # Sort the filenodes by their revision #
1903 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1901 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1904 # Create a group generator and only pass in a changenode
1902 # Create a group generator and only pass in a changenode
1905 # lookup function as we need to collect no information
1903 # lookup function as we need to collect no information
1906 # from filenodes.
1904 # from filenodes.
1907 group = filerevlog.group(msng_filenode_lst,
1905 group = filerevlog.group(msng_filenode_lst,
1908 lookup_filenode_link_func(fname))
1906 lookup_filenode_link_func(fname))
1909 for chnk in group:
1907 for chnk in group:
1910 yield chnk
1908 yield chnk
1911 if fname in msng_filenode_set:
1909 if fname in msng_filenode_set:
1912 # Don't need this anymore, toss it to free memory.
1910 # Don't need this anymore, toss it to free memory.
1913 del msng_filenode_set[fname]
1911 del msng_filenode_set[fname]
1914 # Signal that no more groups are left.
1912 # Signal that no more groups are left.
1915 yield changegroup.closechunk()
1913 yield changegroup.closechunk()
1916
1914
1917 if msng_cl_lst:
1915 if msng_cl_lst:
1918 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1916 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1919
1917
1920 return util.chunkbuffer(gengroup())
1918 return util.chunkbuffer(gengroup())
1921
1919
1922 def changegroup(self, basenodes, source):
1920 def changegroup(self, basenodes, source):
1923 # to avoid a race we use changegroupsubset() (issue1320)
1921 # to avoid a race we use changegroupsubset() (issue1320)
1924 return self.changegroupsubset(basenodes, self.heads(), source)
1922 return self.changegroupsubset(basenodes, self.heads(), source)
1925
1923
1926 def _changegroup(self, common, source):
1924 def _changegroup(self, common, source):
1927 """Generate a changegroup of all nodes that we have that a recipient
1925 """Generate a changegroup of all nodes that we have that a recipient
1928 doesn't.
1926 doesn't.
1929
1927
1930 This is much easier than the previous function as we can assume that
1928 This is much easier than the previous function as we can assume that
1931 the recipient has any changenode we aren't sending them.
1929 the recipient has any changenode we aren't sending them.
1932
1930
1933 common is the set of common nodes between remote and self"""
1931 common is the set of common nodes between remote and self"""
1934
1932
1935 self.hook('preoutgoing', throw=True, source=source)
1933 self.hook('preoutgoing', throw=True, source=source)
1936
1934
1937 cl = self.changelog
1935 cl = self.changelog
1938 nodes = cl.findmissing(common)
1936 nodes = cl.findmissing(common)
1939 revset = set([cl.rev(n) for n in nodes])
1937 revset = set([cl.rev(n) for n in nodes])
1940 self.changegroupinfo(nodes, source)
1938 self.changegroupinfo(nodes, source)
1941
1939
1942 def identity(x):
1940 def identity(x):
1943 return x
1941 return x
1944
1942
1945 def gennodelst(log):
1943 def gennodelst(log):
1946 for r in log:
1944 for r in log:
1947 if log.linkrev(r) in revset:
1945 if log.linkrev(r) in revset:
1948 yield log.node(r)
1946 yield log.node(r)
1949
1947
1950 def changed_file_collector(changedfileset):
1948 def changed_file_collector(changedfileset):
1951 def collect_changed_files(clnode):
1949 def collect_changed_files(clnode):
1952 c = cl.read(clnode)
1950 c = cl.read(clnode)
1953 changedfileset.update(c[3])
1951 changedfileset.update(c[3])
1954 return collect_changed_files
1952 return collect_changed_files
1955
1953
1956 def lookuprevlink_func(revlog):
1954 def lookuprevlink_func(revlog):
1957 def lookuprevlink(n):
1955 def lookuprevlink(n):
1958 return cl.node(revlog.linkrev(revlog.rev(n)))
1956 return cl.node(revlog.linkrev(revlog.rev(n)))
1959 return lookuprevlink
1957 return lookuprevlink
1960
1958
1961 def gengroup():
1959 def gengroup():
1962 # construct a list of all changed files
1960 # construct a list of all changed files
1963 changedfiles = set()
1961 changedfiles = set()
1964
1962
1965 for chnk in cl.group(nodes, identity,
1963 for chnk in cl.group(nodes, identity,
1966 changed_file_collector(changedfiles)):
1964 changed_file_collector(changedfiles)):
1967 yield chnk
1965 yield chnk
1968
1966
1969 mnfst = self.manifest
1967 mnfst = self.manifest
1970 nodeiter = gennodelst(mnfst)
1968 nodeiter = gennodelst(mnfst)
1971 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1969 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1972 yield chnk
1970 yield chnk
1973
1971
1974 for fname in sorted(changedfiles):
1972 for fname in sorted(changedfiles):
1975 filerevlog = self.file(fname)
1973 filerevlog = self.file(fname)
1976 if not len(filerevlog):
1974 if not len(filerevlog):
1977 raise util.Abort(_("empty or missing revlog for %s") % fname)
1975 raise util.Abort(_("empty or missing revlog for %s") % fname)
1978 nodeiter = gennodelst(filerevlog)
1976 nodeiter = gennodelst(filerevlog)
1979 nodeiter = list(nodeiter)
1977 nodeiter = list(nodeiter)
1980 if nodeiter:
1978 if nodeiter:
1981 yield changegroup.chunkheader(len(fname))
1979 yield changegroup.chunkheader(len(fname))
1982 yield fname
1980 yield fname
1983 lookup = lookuprevlink_func(filerevlog)
1981 lookup = lookuprevlink_func(filerevlog)
1984 for chnk in filerevlog.group(nodeiter, lookup):
1982 for chnk in filerevlog.group(nodeiter, lookup):
1985 yield chnk
1983 yield chnk
1986
1984
1987 yield changegroup.closechunk()
1985 yield changegroup.closechunk()
1988
1986
1989 if nodes:
1987 if nodes:
1990 self.hook('outgoing', node=hex(nodes[0]), source=source)
1988 self.hook('outgoing', node=hex(nodes[0]), source=source)
1991
1989
1992 return util.chunkbuffer(gengroup())
1990 return util.chunkbuffer(gengroup())
1993
1991
1994 def addchangegroup(self, source, srctype, url, emptyok=False):
1992 def addchangegroup(self, source, srctype, url, emptyok=False):
1995 """add changegroup to repo.
1993 """add changegroup to repo.
1996
1994
1997 return values:
1995 return values:
1998 - nothing changed or no source: 0
1996 - nothing changed or no source: 0
1999 - more heads than before: 1+added heads (2..n)
1997 - more heads than before: 1+added heads (2..n)
2000 - less heads than before: -1-removed heads (-2..-n)
1998 - less heads than before: -1-removed heads (-2..-n)
2001 - number of heads stays the same: 1
1999 - number of heads stays the same: 1
2002 """
2000 """
2003 def csmap(x):
2001 def csmap(x):
2004 self.ui.debug(_("add changeset %s\n") % short(x))
2002 self.ui.debug(_("add changeset %s\n") % short(x))
2005 return len(cl)
2003 return len(cl)
2006
2004
2007 def revmap(x):
2005 def revmap(x):
2008 return cl.rev(x)
2006 return cl.rev(x)
2009
2007
2010 if not source:
2008 if not source:
2011 return 0
2009 return 0
2012
2010
2013 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2011 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2014
2012
2015 changesets = files = revisions = 0
2013 changesets = files = revisions = 0
2016
2014
2017 # write changelog data to temp files so concurrent readers will not see
2015 # write changelog data to temp files so concurrent readers will not see
2018 # inconsistent view
2016 # inconsistent view
2019 cl = self.changelog
2017 cl = self.changelog
2020 cl.delayupdate()
2018 cl.delayupdate()
2021 oldheads = len(cl.heads())
2019 oldheads = len(cl.heads())
2022
2020
2023 tr = self.transaction()
2021 tr = self.transaction()
2024 try:
2022 try:
2025 trp = weakref.proxy(tr)
2023 trp = weakref.proxy(tr)
2026 # pull off the changeset group
2024 # pull off the changeset group
2027 self.ui.status(_("adding changesets\n"))
2025 self.ui.status(_("adding changesets\n"))
2028 clstart = len(cl)
2026 clstart = len(cl)
2029 chunkiter = changegroup.chunkiter(source)
2027 chunkiter = changegroup.chunkiter(source)
2030 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2028 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2031 raise util.Abort(_("received changelog group is empty"))
2029 raise util.Abort(_("received changelog group is empty"))
2032 clend = len(cl)
2030 clend = len(cl)
2033 changesets = clend - clstart
2031 changesets = clend - clstart
2034
2032
2035 # pull off the manifest group
2033 # pull off the manifest group
2036 self.ui.status(_("adding manifests\n"))
2034 self.ui.status(_("adding manifests\n"))
2037 chunkiter = changegroup.chunkiter(source)
2035 chunkiter = changegroup.chunkiter(source)
2038 # no need to check for empty manifest group here:
2036 # no need to check for empty manifest group here:
2039 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2037 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2040 # no new manifest will be created and the manifest group will
2038 # no new manifest will be created and the manifest group will
2041 # be empty during the pull
2039 # be empty during the pull
2042 self.manifest.addgroup(chunkiter, revmap, trp)
2040 self.manifest.addgroup(chunkiter, revmap, trp)
2043
2041
2044 # process the files
2042 # process the files
2045 self.ui.status(_("adding file changes\n"))
2043 self.ui.status(_("adding file changes\n"))
2046 while 1:
2044 while 1:
2047 f = changegroup.getchunk(source)
2045 f = changegroup.getchunk(source)
2048 if not f:
2046 if not f:
2049 break
2047 break
2050 self.ui.debug(_("adding %s revisions\n") % f)
2048 self.ui.debug(_("adding %s revisions\n") % f)
2051 fl = self.file(f)
2049 fl = self.file(f)
2052 o = len(fl)
2050 o = len(fl)
2053 chunkiter = changegroup.chunkiter(source)
2051 chunkiter = changegroup.chunkiter(source)
2054 if fl.addgroup(chunkiter, revmap, trp) is None:
2052 if fl.addgroup(chunkiter, revmap, trp) is None:
2055 raise util.Abort(_("received file revlog group is empty"))
2053 raise util.Abort(_("received file revlog group is empty"))
2056 revisions += len(fl) - o
2054 revisions += len(fl) - o
2057 files += 1
2055 files += 1
2058
2056
2059 newheads = len(cl.heads())
2057 newheads = len(cl.heads())
2060 heads = ""
2058 heads = ""
2061 if oldheads and newheads != oldheads:
2059 if oldheads and newheads != oldheads:
2062 heads = _(" (%+d heads)") % (newheads - oldheads)
2060 heads = _(" (%+d heads)") % (newheads - oldheads)
2063
2061
2064 self.ui.status(_("added %d changesets"
2062 self.ui.status(_("added %d changesets"
2065 " with %d changes to %d files%s\n")
2063 " with %d changes to %d files%s\n")
2066 % (changesets, revisions, files, heads))
2064 % (changesets, revisions, files, heads))
2067
2065
2068 if changesets > 0:
2066 if changesets > 0:
2069 p = lambda: cl.writepending() and self.root or ""
2067 p = lambda: cl.writepending() and self.root or ""
2070 self.hook('pretxnchangegroup', throw=True,
2068 self.hook('pretxnchangegroup', throw=True,
2071 node=hex(cl.node(clstart)), source=srctype,
2069 node=hex(cl.node(clstart)), source=srctype,
2072 url=url, pending=p)
2070 url=url, pending=p)
2073
2071
2074 # make changelog see real files again
2072 # make changelog see real files again
2075 cl.finalize(trp)
2073 cl.finalize(trp)
2076
2074
2077 tr.close()
2075 tr.close()
2078 finally:
2076 finally:
2079 del tr
2077 del tr
2080
2078
2081 if changesets > 0:
2079 if changesets > 0:
2082 # forcefully update the on-disk branch cache
2080 # forcefully update the on-disk branch cache
2083 self.ui.debug(_("updating the branch cache\n"))
2081 self.ui.debug(_("updating the branch cache\n"))
2084 self.branchtags()
2082 self.branchtags()
2085 self.hook("changegroup", node=hex(cl.node(clstart)),
2083 self.hook("changegroup", node=hex(cl.node(clstart)),
2086 source=srctype, url=url)
2084 source=srctype, url=url)
2087
2085
2088 for i in xrange(clstart, clend):
2086 for i in xrange(clstart, clend):
2089 self.hook("incoming", node=hex(cl.node(i)),
2087 self.hook("incoming", node=hex(cl.node(i)),
2090 source=srctype, url=url)
2088 source=srctype, url=url)
2091
2089
2092 # never return 0 here:
2090 # never return 0 here:
2093 if newheads < oldheads:
2091 if newheads < oldheads:
2094 return newheads - oldheads - 1
2092 return newheads - oldheads - 1
2095 else:
2093 else:
2096 return newheads - oldheads + 1
2094 return newheads - oldheads + 1
2097
2095
2098
2096
2099 def stream_in(self, remote):
2097 def stream_in(self, remote):
2100 fp = remote.stream_out()
2098 fp = remote.stream_out()
2101 l = fp.readline()
2099 l = fp.readline()
2102 try:
2100 try:
2103 resp = int(l)
2101 resp = int(l)
2104 except ValueError:
2102 except ValueError:
2105 raise error.ResponseError(
2103 raise error.ResponseError(
2106 _('Unexpected response from remote server:'), l)
2104 _('Unexpected response from remote server:'), l)
2107 if resp == 1:
2105 if resp == 1:
2108 raise util.Abort(_('operation forbidden by server'))
2106 raise util.Abort(_('operation forbidden by server'))
2109 elif resp == 2:
2107 elif resp == 2:
2110 raise util.Abort(_('locking the remote repository failed'))
2108 raise util.Abort(_('locking the remote repository failed'))
2111 elif resp != 0:
2109 elif resp != 0:
2112 raise util.Abort(_('the server sent an unknown error code'))
2110 raise util.Abort(_('the server sent an unknown error code'))
2113 self.ui.status(_('streaming all changes\n'))
2111 self.ui.status(_('streaming all changes\n'))
2114 l = fp.readline()
2112 l = fp.readline()
2115 try:
2113 try:
2116 total_files, total_bytes = map(int, l.split(' ', 1))
2114 total_files, total_bytes = map(int, l.split(' ', 1))
2117 except (ValueError, TypeError):
2115 except (ValueError, TypeError):
2118 raise error.ResponseError(
2116 raise error.ResponseError(
2119 _('Unexpected response from remote server:'), l)
2117 _('Unexpected response from remote server:'), l)
2120 self.ui.status(_('%d files to transfer, %s of data\n') %
2118 self.ui.status(_('%d files to transfer, %s of data\n') %
2121 (total_files, util.bytecount(total_bytes)))
2119 (total_files, util.bytecount(total_bytes)))
2122 start = time.time()
2120 start = time.time()
2123 for i in xrange(total_files):
2121 for i in xrange(total_files):
2124 # XXX doesn't support '\n' or '\r' in filenames
2122 # XXX doesn't support '\n' or '\r' in filenames
2125 l = fp.readline()
2123 l = fp.readline()
2126 try:
2124 try:
2127 name, size = l.split('\0', 1)
2125 name, size = l.split('\0', 1)
2128 size = int(size)
2126 size = int(size)
2129 except (ValueError, TypeError):
2127 except (ValueError, TypeError):
2130 raise error.ResponseError(
2128 raise error.ResponseError(
2131 _('Unexpected response from remote server:'), l)
2129 _('Unexpected response from remote server:'), l)
2132 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2130 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2133 # for backwards compat, name was partially encoded
2131 # for backwards compat, name was partially encoded
2134 ofp = self.sopener(store.decodedir(name), 'w')
2132 ofp = self.sopener(store.decodedir(name), 'w')
2135 for chunk in util.filechunkiter(fp, limit=size):
2133 for chunk in util.filechunkiter(fp, limit=size):
2136 ofp.write(chunk)
2134 ofp.write(chunk)
2137 ofp.close()
2135 ofp.close()
2138 elapsed = time.time() - start
2136 elapsed = time.time() - start
2139 if elapsed <= 0:
2137 if elapsed <= 0:
2140 elapsed = 0.001
2138 elapsed = 0.001
2141 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2139 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2142 (util.bytecount(total_bytes), elapsed,
2140 (util.bytecount(total_bytes), elapsed,
2143 util.bytecount(total_bytes / elapsed)))
2141 util.bytecount(total_bytes / elapsed)))
2144 self.invalidate()
2142 self.invalidate()
2145 return len(self.heads()) + 1
2143 return len(self.heads()) + 1
2146
2144
2147 def clone(self, remote, heads=[], stream=False):
2145 def clone(self, remote, heads=[], stream=False):
2148 '''clone remote repository.
2146 '''clone remote repository.
2149
2147
2150 keyword arguments:
2148 keyword arguments:
2151 heads: list of revs to clone (forces use of pull)
2149 heads: list of revs to clone (forces use of pull)
2152 stream: use streaming clone if possible'''
2150 stream: use streaming clone if possible'''
2153
2151
2154 # now, all clients that can request uncompressed clones can
2152 # now, all clients that can request uncompressed clones can
2155 # read repo formats supported by all servers that can serve
2153 # read repo formats supported by all servers that can serve
2156 # them.
2154 # them.
2157
2155
2158 # if revlog format changes, client will have to check version
2156 # if revlog format changes, client will have to check version
2159 # and format flags on "stream" capability, and use
2157 # and format flags on "stream" capability, and use
2160 # uncompressed only if compatible.
2158 # uncompressed only if compatible.
2161
2159
2162 if stream and not heads and remote.capable('stream'):
2160 if stream and not heads and remote.capable('stream'):
2163 return self.stream_in(remote)
2161 return self.stream_in(remote)
2164 return self.pull(remote, heads)
2162 return self.pull(remote, heads)
2165
2163
2166 # used to avoid circular references so destructors work
2164 # used to avoid circular references so destructors work
2167 def aftertrans(files):
2165 def aftertrans(files):
2168 renamefiles = [tuple(t) for t in files]
2166 renamefiles = [tuple(t) for t in files]
2169 def a():
2167 def a():
2170 for src, dest in renamefiles:
2168 for src, dest in renamefiles:
2171 util.rename(src, dest)
2169 util.rename(src, dest)
2172 return a
2170 return a
2173
2171
2174 def instance(ui, path, create):
2172 def instance(ui, path, create):
2175 return localrepository(ui, util.drop_scheme('file', path), create)
2173 return localrepository(ui, util.drop_scheme('file', path), create)
2176
2174
2177 def islocal(path):
2175 def islocal(path):
2178 return True
2176 return True
General Comments 0
You need to be logged in to leave comments. Login now