##// END OF EJS Templates
commitctx: use contexts more fully
Matt Mackall -
r8414:2348ce25 default
parent child Browse files
Show More
@@ -1,2099 +1,2095
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect
17 import weakref, stat, errno, os, time, inspect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset'))
21 capabilities = set(('lookup', 'changegroupsubset'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid == None:
110 if changeid == None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
186 tagnode = self.commit(['.hgtags'], message, user, date, extra=extra)
187
187
188 for name in names:
188 for name in names:
189 self.hook('tag', node=hex(node), tag=name, local=local)
189 self.hook('tag', node=hex(node), tag=name, local=local)
190
190
191 return tagnode
191 return tagnode
192
192
193 def tag(self, names, node, message, local, user, date):
193 def tag(self, names, node, message, local, user, date):
194 '''tag a revision with one or more symbolic names.
194 '''tag a revision with one or more symbolic names.
195
195
196 names is a list of strings or, when adding a single tag, names may be a
196 names is a list of strings or, when adding a single tag, names may be a
197 string.
197 string.
198
198
199 if local is True, the tags are stored in a per-repository file.
199 if local is True, the tags are stored in a per-repository file.
200 otherwise, they are stored in the .hgtags file, and a new
200 otherwise, they are stored in the .hgtags file, and a new
201 changeset is committed with the change.
201 changeset is committed with the change.
202
202
203 keyword arguments:
203 keyword arguments:
204
204
205 local: whether to store tags in non-version-controlled file
205 local: whether to store tags in non-version-controlled file
206 (default False)
206 (default False)
207
207
208 message: commit message to use if committing
208 message: commit message to use if committing
209
209
210 user: name of user to use if committing
210 user: name of user to use if committing
211
211
212 date: date tuple to use if committing'''
212 date: date tuple to use if committing'''
213
213
214 for x in self.status()[:5]:
214 for x in self.status()[:5]:
215 if '.hgtags' in x:
215 if '.hgtags' in x:
216 raise util.Abort(_('working copy of .hgtags is changed '
216 raise util.Abort(_('working copy of .hgtags is changed '
217 '(please commit .hgtags manually)'))
217 '(please commit .hgtags manually)'))
218
218
219 self.tags() # instantiate the cache
219 self.tags() # instantiate the cache
220 self._tag(names, node, message, local, user, date)
220 self._tag(names, node, message, local, user, date)
221
221
222 def tags(self):
222 def tags(self):
223 '''return a mapping of tag to node'''
223 '''return a mapping of tag to node'''
224 if self.tagscache:
224 if self.tagscache:
225 return self.tagscache
225 return self.tagscache
226
226
227 globaltags = {}
227 globaltags = {}
228 tagtypes = {}
228 tagtypes = {}
229
229
230 def readtags(lines, fn, tagtype):
230 def readtags(lines, fn, tagtype):
231 filetags = {}
231 filetags = {}
232 count = 0
232 count = 0
233
233
234 def warn(msg):
234 def warn(msg):
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
235 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236
236
237 for l in lines:
237 for l in lines:
238 count += 1
238 count += 1
239 if not l:
239 if not l:
240 continue
240 continue
241 s = l.split(" ", 1)
241 s = l.split(" ", 1)
242 if len(s) != 2:
242 if len(s) != 2:
243 warn(_("cannot parse entry"))
243 warn(_("cannot parse entry"))
244 continue
244 continue
245 node, key = s
245 node, key = s
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
246 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 try:
247 try:
248 bin_n = bin(node)
248 bin_n = bin(node)
249 except TypeError:
249 except TypeError:
250 warn(_("node '%s' is not well formed") % node)
250 warn(_("node '%s' is not well formed") % node)
251 continue
251 continue
252 if bin_n not in self.changelog.nodemap:
252 if bin_n not in self.changelog.nodemap:
253 warn(_("tag '%s' refers to unknown node") % key)
253 warn(_("tag '%s' refers to unknown node") % key)
254 continue
254 continue
255
255
256 h = []
256 h = []
257 if key in filetags:
257 if key in filetags:
258 n, h = filetags[key]
258 n, h = filetags[key]
259 h.append(n)
259 h.append(n)
260 filetags[key] = (bin_n, h)
260 filetags[key] = (bin_n, h)
261
261
262 for k, nh in filetags.iteritems():
262 for k, nh in filetags.iteritems():
263 if k not in globaltags:
263 if k not in globaltags:
264 globaltags[k] = nh
264 globaltags[k] = nh
265 tagtypes[k] = tagtype
265 tagtypes[k] = tagtype
266 continue
266 continue
267
267
268 # we prefer the global tag if:
268 # we prefer the global tag if:
269 # it supercedes us OR
269 # it supercedes us OR
270 # mutual supercedes and it has a higher rank
270 # mutual supercedes and it has a higher rank
271 # otherwise we win because we're tip-most
271 # otherwise we win because we're tip-most
272 an, ah = nh
272 an, ah = nh
273 bn, bh = globaltags[k]
273 bn, bh = globaltags[k]
274 if (bn != an and an in bh and
274 if (bn != an and an in bh and
275 (bn not in ah or len(bh) > len(ah))):
275 (bn not in ah or len(bh) > len(ah))):
276 an = bn
276 an = bn
277 ah.extend([n for n in bh if n not in ah])
277 ah.extend([n for n in bh if n not in ah])
278 globaltags[k] = an, ah
278 globaltags[k] = an, ah
279 tagtypes[k] = tagtype
279 tagtypes[k] = tagtype
280
280
281 # read the tags file from each head, ending with the tip
281 # read the tags file from each head, ending with the tip
282 f = None
282 f = None
283 for rev, node, fnode in self._hgtagsnodes():
283 for rev, node, fnode in self._hgtagsnodes():
284 f = (f and f.filectx(fnode) or
284 f = (f and f.filectx(fnode) or
285 self.filectx('.hgtags', fileid=fnode))
285 self.filectx('.hgtags', fileid=fnode))
286 readtags(f.data().splitlines(), f, "global")
286 readtags(f.data().splitlines(), f, "global")
287
287
288 try:
288 try:
289 data = encoding.fromlocal(self.opener("localtags").read())
289 data = encoding.fromlocal(self.opener("localtags").read())
290 # localtags are stored in the local character set
290 # localtags are stored in the local character set
291 # while the internal tag table is stored in UTF-8
291 # while the internal tag table is stored in UTF-8
292 readtags(data.splitlines(), "localtags", "local")
292 readtags(data.splitlines(), "localtags", "local")
293 except IOError:
293 except IOError:
294 pass
294 pass
295
295
296 self.tagscache = {}
296 self.tagscache = {}
297 self._tagstypecache = {}
297 self._tagstypecache = {}
298 for k, nh in globaltags.iteritems():
298 for k, nh in globaltags.iteritems():
299 n = nh[0]
299 n = nh[0]
300 if n != nullid:
300 if n != nullid:
301 self.tagscache[k] = n
301 self.tagscache[k] = n
302 self._tagstypecache[k] = tagtypes[k]
302 self._tagstypecache[k] = tagtypes[k]
303 self.tagscache['tip'] = self.changelog.tip()
303 self.tagscache['tip'] = self.changelog.tip()
304 return self.tagscache
304 return self.tagscache
305
305
306 def tagtype(self, tagname):
306 def tagtype(self, tagname):
307 '''
307 '''
308 return the type of the given tag. result can be:
308 return the type of the given tag. result can be:
309
309
310 'local' : a local tag
310 'local' : a local tag
311 'global' : a global tag
311 'global' : a global tag
312 None : tag does not exist
312 None : tag does not exist
313 '''
313 '''
314
314
315 self.tags()
315 self.tags()
316
316
317 return self._tagstypecache.get(tagname)
317 return self._tagstypecache.get(tagname)
318
318
319 def _hgtagsnodes(self):
319 def _hgtagsnodes(self):
320 last = {}
320 last = {}
321 ret = []
321 ret = []
322 for node in reversed(self.heads()):
322 for node in reversed(self.heads()):
323 c = self[node]
323 c = self[node]
324 rev = c.rev()
324 rev = c.rev()
325 try:
325 try:
326 fnode = c.filenode('.hgtags')
326 fnode = c.filenode('.hgtags')
327 except error.LookupError:
327 except error.LookupError:
328 continue
328 continue
329 ret.append((rev, node, fnode))
329 ret.append((rev, node, fnode))
330 if fnode in last:
330 if fnode in last:
331 ret[last[fnode]] = None
331 ret[last[fnode]] = None
332 last[fnode] = len(ret) - 1
332 last[fnode] = len(ret) - 1
333 return [item for item in ret if item]
333 return [item for item in ret if item]
334
334
335 def tagslist(self):
335 def tagslist(self):
336 '''return a list of tags ordered by revision'''
336 '''return a list of tags ordered by revision'''
337 l = []
337 l = []
338 for t, n in self.tags().iteritems():
338 for t, n in self.tags().iteritems():
339 try:
339 try:
340 r = self.changelog.rev(n)
340 r = self.changelog.rev(n)
341 except:
341 except:
342 r = -2 # sort to the beginning of the list if unknown
342 r = -2 # sort to the beginning of the list if unknown
343 l.append((r, t, n))
343 l.append((r, t, n))
344 return [(t, n) for r, t, n in sorted(l)]
344 return [(t, n) for r, t, n in sorted(l)]
345
345
346 def nodetags(self, node):
346 def nodetags(self, node):
347 '''return the tags associated with a node'''
347 '''return the tags associated with a node'''
348 if not self.nodetagscache:
348 if not self.nodetagscache:
349 self.nodetagscache = {}
349 self.nodetagscache = {}
350 for t, n in self.tags().iteritems():
350 for t, n in self.tags().iteritems():
351 self.nodetagscache.setdefault(n, []).append(t)
351 self.nodetagscache.setdefault(n, []).append(t)
352 return self.nodetagscache.get(node, [])
352 return self.nodetagscache.get(node, [])
353
353
354 def _branchtags(self, partial, lrev):
354 def _branchtags(self, partial, lrev):
355 # TODO: rename this function?
355 # TODO: rename this function?
356 tiprev = len(self) - 1
356 tiprev = len(self) - 1
357 if lrev != tiprev:
357 if lrev != tiprev:
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360
360
361 return partial
361 return partial
362
362
363 def _branchheads(self):
363 def _branchheads(self):
364 tip = self.changelog.tip()
364 tip = self.changelog.tip()
365 if self.branchcache is not None and self._branchcachetip == tip:
365 if self.branchcache is not None and self._branchcachetip == tip:
366 return self.branchcache
366 return self.branchcache
367
367
368 oldtip = self._branchcachetip
368 oldtip = self._branchcachetip
369 self._branchcachetip = tip
369 self._branchcachetip = tip
370 if self.branchcache is None:
370 if self.branchcache is None:
371 self.branchcache = {} # avoid recursion in changectx
371 self.branchcache = {} # avoid recursion in changectx
372 else:
372 else:
373 self.branchcache.clear() # keep using the same dict
373 self.branchcache.clear() # keep using the same dict
374 if oldtip is None or oldtip not in self.changelog.nodemap:
374 if oldtip is None or oldtip not in self.changelog.nodemap:
375 partial, last, lrev = self._readbranchcache()
375 partial, last, lrev = self._readbranchcache()
376 else:
376 else:
377 lrev = self.changelog.rev(oldtip)
377 lrev = self.changelog.rev(oldtip)
378 partial = self._ubranchcache
378 partial = self._ubranchcache
379
379
380 self._branchtags(partial, lrev)
380 self._branchtags(partial, lrev)
381 # this private cache holds all heads (not just tips)
381 # this private cache holds all heads (not just tips)
382 self._ubranchcache = partial
382 self._ubranchcache = partial
383
383
384 # the branch cache is stored on disk as UTF-8, but in the local
384 # the branch cache is stored on disk as UTF-8, but in the local
385 # charset internally
385 # charset internally
386 for k, v in partial.iteritems():
386 for k, v in partial.iteritems():
387 self.branchcache[encoding.tolocal(k)] = v
387 self.branchcache[encoding.tolocal(k)] = v
388 return self.branchcache
388 return self.branchcache
389
389
390
390
391 def branchtags(self):
391 def branchtags(self):
392 '''return a dict where branch names map to the tipmost head of
392 '''return a dict where branch names map to the tipmost head of
393 the branch, open heads come before closed'''
393 the branch, open heads come before closed'''
394 bt = {}
394 bt = {}
395 for bn, heads in self._branchheads().iteritems():
395 for bn, heads in self._branchheads().iteritems():
396 head = None
396 head = None
397 for i in range(len(heads)-1, -1, -1):
397 for i in range(len(heads)-1, -1, -1):
398 h = heads[i]
398 h = heads[i]
399 if 'close' not in self.changelog.read(h)[5]:
399 if 'close' not in self.changelog.read(h)[5]:
400 head = h
400 head = h
401 break
401 break
402 # no open heads were found
402 # no open heads were found
403 if head is None:
403 if head is None:
404 head = heads[-1]
404 head = heads[-1]
405 bt[bn] = head
405 bt[bn] = head
406 return bt
406 return bt
407
407
408
408
409 def _readbranchcache(self):
409 def _readbranchcache(self):
410 partial = {}
410 partial = {}
411 try:
411 try:
412 f = self.opener("branchheads.cache")
412 f = self.opener("branchheads.cache")
413 lines = f.read().split('\n')
413 lines = f.read().split('\n')
414 f.close()
414 f.close()
415 except (IOError, OSError):
415 except (IOError, OSError):
416 return {}, nullid, nullrev
416 return {}, nullid, nullrev
417
417
418 try:
418 try:
419 last, lrev = lines.pop(0).split(" ", 1)
419 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = bin(last), int(lrev)
420 last, lrev = bin(last), int(lrev)
421 if lrev >= len(self) or self[lrev].node() != last:
421 if lrev >= len(self) or self[lrev].node() != last:
422 # invalidate the cache
422 # invalidate the cache
423 raise ValueError('invalidating branch cache (tip differs)')
423 raise ValueError('invalidating branch cache (tip differs)')
424 for l in lines:
424 for l in lines:
425 if not l: continue
425 if not l: continue
426 node, label = l.split(" ", 1)
426 node, label = l.split(" ", 1)
427 partial.setdefault(label.strip(), []).append(bin(node))
427 partial.setdefault(label.strip(), []).append(bin(node))
428 except KeyboardInterrupt:
428 except KeyboardInterrupt:
429 raise
429 raise
430 except Exception, inst:
430 except Exception, inst:
431 if self.ui.debugflag:
431 if self.ui.debugflag:
432 self.ui.warn(str(inst), '\n')
432 self.ui.warn(str(inst), '\n')
433 partial, last, lrev = {}, nullid, nullrev
433 partial, last, lrev = {}, nullid, nullrev
434 return partial, last, lrev
434 return partial, last, lrev
435
435
436 def _writebranchcache(self, branches, tip, tiprev):
436 def _writebranchcache(self, branches, tip, tiprev):
437 try:
437 try:
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
438 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f.write("%s %s\n" % (hex(tip), tiprev))
439 f.write("%s %s\n" % (hex(tip), tiprev))
440 for label, nodes in branches.iteritems():
440 for label, nodes in branches.iteritems():
441 for node in nodes:
441 for node in nodes:
442 f.write("%s %s\n" % (hex(node), label))
442 f.write("%s %s\n" % (hex(node), label))
443 f.rename()
443 f.rename()
444 except (IOError, OSError):
444 except (IOError, OSError):
445 pass
445 pass
446
446
447 def _updatebranchcache(self, partial, start, end):
447 def _updatebranchcache(self, partial, start, end):
448 for r in xrange(start, end):
448 for r in xrange(start, end):
449 c = self[r]
449 c = self[r]
450 b = c.branch()
450 b = c.branch()
451 bheads = partial.setdefault(b, [])
451 bheads = partial.setdefault(b, [])
452 bheads.append(c.node())
452 bheads.append(c.node())
453 for p in c.parents():
453 for p in c.parents():
454 pn = p.node()
454 pn = p.node()
455 if pn in bheads:
455 if pn in bheads:
456 bheads.remove(pn)
456 bheads.remove(pn)
457
457
458 def lookup(self, key):
458 def lookup(self, key):
459 if isinstance(key, int):
459 if isinstance(key, int):
460 return self.changelog.node(key)
460 return self.changelog.node(key)
461 elif key == '.':
461 elif key == '.':
462 return self.dirstate.parents()[0]
462 return self.dirstate.parents()[0]
463 elif key == 'null':
463 elif key == 'null':
464 return nullid
464 return nullid
465 elif key == 'tip':
465 elif key == 'tip':
466 return self.changelog.tip()
466 return self.changelog.tip()
467 n = self.changelog._match(key)
467 n = self.changelog._match(key)
468 if n:
468 if n:
469 return n
469 return n
470 if key in self.tags():
470 if key in self.tags():
471 return self.tags()[key]
471 return self.tags()[key]
472 if key in self.branchtags():
472 if key in self.branchtags():
473 return self.branchtags()[key]
473 return self.branchtags()[key]
474 n = self.changelog._partialmatch(key)
474 n = self.changelog._partialmatch(key)
475 if n:
475 if n:
476 return n
476 return n
477 try:
477 try:
478 if len(key) == 20:
478 if len(key) == 20:
479 key = hex(key)
479 key = hex(key)
480 except:
480 except:
481 pass
481 pass
482 raise error.RepoError(_("unknown revision '%s'") % key)
482 raise error.RepoError(_("unknown revision '%s'") % key)
483
483
484 def local(self):
484 def local(self):
485 return True
485 return True
486
486
487 def join(self, f):
487 def join(self, f):
488 return os.path.join(self.path, f)
488 return os.path.join(self.path, f)
489
489
490 def wjoin(self, f):
490 def wjoin(self, f):
491 return os.path.join(self.root, f)
491 return os.path.join(self.root, f)
492
492
493 def rjoin(self, f):
493 def rjoin(self, f):
494 return os.path.join(self.root, util.pconvert(f))
494 return os.path.join(self.root, util.pconvert(f))
495
495
496 def file(self, f):
496 def file(self, f):
497 if f[0] == '/':
497 if f[0] == '/':
498 f = f[1:]
498 f = f[1:]
499 return filelog.filelog(self.sopener, f)
499 return filelog.filelog(self.sopener, f)
500
500
501 def changectx(self, changeid):
501 def changectx(self, changeid):
502 return self[changeid]
502 return self[changeid]
503
503
504 def parents(self, changeid=None):
504 def parents(self, changeid=None):
505 '''get list of changectxs for parents of changeid'''
505 '''get list of changectxs for parents of changeid'''
506 return self[changeid].parents()
506 return self[changeid].parents()
507
507
508 def filectx(self, path, changeid=None, fileid=None):
508 def filectx(self, path, changeid=None, fileid=None):
509 """changeid can be a changeset revision, node, or tag.
509 """changeid can be a changeset revision, node, or tag.
510 fileid can be a file revision or node."""
510 fileid can be a file revision or node."""
511 return context.filectx(self, path, changeid, fileid)
511 return context.filectx(self, path, changeid, fileid)
512
512
513 def getcwd(self):
513 def getcwd(self):
514 return self.dirstate.getcwd()
514 return self.dirstate.getcwd()
515
515
516 def pathto(self, f, cwd=None):
516 def pathto(self, f, cwd=None):
517 return self.dirstate.pathto(f, cwd)
517 return self.dirstate.pathto(f, cwd)
518
518
519 def wfile(self, f, mode='r'):
519 def wfile(self, f, mode='r'):
520 return self.wopener(f, mode)
520 return self.wopener(f, mode)
521
521
522 def _link(self, f):
522 def _link(self, f):
523 return os.path.islink(self.wjoin(f))
523 return os.path.islink(self.wjoin(f))
524
524
525 def _filter(self, filter, filename, data):
525 def _filter(self, filter, filename, data):
526 if filter not in self.filterpats:
526 if filter not in self.filterpats:
527 l = []
527 l = []
528 for pat, cmd in self.ui.configitems(filter):
528 for pat, cmd in self.ui.configitems(filter):
529 if cmd == '!':
529 if cmd == '!':
530 continue
530 continue
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
531 mf = util.matcher(self.root, "", [pat], [], [])[1]
532 fn = None
532 fn = None
533 params = cmd
533 params = cmd
534 for name, filterfn in self._datafilters.iteritems():
534 for name, filterfn in self._datafilters.iteritems():
535 if cmd.startswith(name):
535 if cmd.startswith(name):
536 fn = filterfn
536 fn = filterfn
537 params = cmd[len(name):].lstrip()
537 params = cmd[len(name):].lstrip()
538 break
538 break
539 if not fn:
539 if not fn:
540 fn = lambda s, c, **kwargs: util.filter(s, c)
540 fn = lambda s, c, **kwargs: util.filter(s, c)
541 # Wrap old filters not supporting keyword arguments
541 # Wrap old filters not supporting keyword arguments
542 if not inspect.getargspec(fn)[2]:
542 if not inspect.getargspec(fn)[2]:
543 oldfn = fn
543 oldfn = fn
544 fn = lambda s, c, **kwargs: oldfn(s, c)
544 fn = lambda s, c, **kwargs: oldfn(s, c)
545 l.append((mf, fn, params))
545 l.append((mf, fn, params))
546 self.filterpats[filter] = l
546 self.filterpats[filter] = l
547
547
548 for mf, fn, cmd in self.filterpats[filter]:
548 for mf, fn, cmd in self.filterpats[filter]:
549 if mf(filename):
549 if mf(filename):
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
550 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
551 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
552 break
552 break
553
553
554 return data
554 return data
555
555
556 def adddatafilter(self, name, filter):
556 def adddatafilter(self, name, filter):
557 self._datafilters[name] = filter
557 self._datafilters[name] = filter
558
558
559 def wread(self, filename):
559 def wread(self, filename):
560 if self._link(filename):
560 if self._link(filename):
561 data = os.readlink(self.wjoin(filename))
561 data = os.readlink(self.wjoin(filename))
562 else:
562 else:
563 data = self.wopener(filename, 'r').read()
563 data = self.wopener(filename, 'r').read()
564 return self._filter("encode", filename, data)
564 return self._filter("encode", filename, data)
565
565
566 def wwrite(self, filename, data, flags):
566 def wwrite(self, filename, data, flags):
567 data = self._filter("decode", filename, data)
567 data = self._filter("decode", filename, data)
568 try:
568 try:
569 os.unlink(self.wjoin(filename))
569 os.unlink(self.wjoin(filename))
570 except OSError:
570 except OSError:
571 pass
571 pass
572 if 'l' in flags:
572 if 'l' in flags:
573 self.wopener.symlink(data, filename)
573 self.wopener.symlink(data, filename)
574 else:
574 else:
575 self.wopener(filename, 'w').write(data)
575 self.wopener(filename, 'w').write(data)
576 if 'x' in flags:
576 if 'x' in flags:
577 util.set_flags(self.wjoin(filename), False, True)
577 util.set_flags(self.wjoin(filename), False, True)
578
578
579 def wwritedata(self, filename, data):
579 def wwritedata(self, filename, data):
580 return self._filter("decode", filename, data)
580 return self._filter("decode", filename, data)
581
581
582 def transaction(self):
582 def transaction(self):
583 tr = self._transref and self._transref() or None
583 tr = self._transref and self._transref() or None
584 if tr and tr.running():
584 if tr and tr.running():
585 return tr.nest()
585 return tr.nest()
586
586
587 # abort here if the journal already exists
587 # abort here if the journal already exists
588 if os.path.exists(self.sjoin("journal")):
588 if os.path.exists(self.sjoin("journal")):
589 raise error.RepoError(_("journal already exists - run hg recover"))
589 raise error.RepoError(_("journal already exists - run hg recover"))
590
590
591 # save dirstate for rollback
591 # save dirstate for rollback
592 try:
592 try:
593 ds = self.opener("dirstate").read()
593 ds = self.opener("dirstate").read()
594 except IOError:
594 except IOError:
595 ds = ""
595 ds = ""
596 self.opener("journal.dirstate", "w").write(ds)
596 self.opener("journal.dirstate", "w").write(ds)
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
597 self.opener("journal.branch", "w").write(self.dirstate.branch())
598
598
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
599 renames = [(self.sjoin("journal"), self.sjoin("undo")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
600 (self.join("journal.dirstate"), self.join("undo.dirstate")),
601 (self.join("journal.branch"), self.join("undo.branch"))]
601 (self.join("journal.branch"), self.join("undo.branch"))]
602 tr = transaction.transaction(self.ui.warn, self.sopener,
602 tr = transaction.transaction(self.ui.warn, self.sopener,
603 self.sjoin("journal"),
603 self.sjoin("journal"),
604 aftertrans(renames),
604 aftertrans(renames),
605 self.store.createmode)
605 self.store.createmode)
606 self._transref = weakref.ref(tr)
606 self._transref = weakref.ref(tr)
607 return tr
607 return tr
608
608
609 def recover(self):
609 def recover(self):
610 lock = self.lock()
610 lock = self.lock()
611 try:
611 try:
612 if os.path.exists(self.sjoin("journal")):
612 if os.path.exists(self.sjoin("journal")):
613 self.ui.status(_("rolling back interrupted transaction\n"))
613 self.ui.status(_("rolling back interrupted transaction\n"))
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
614 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
615 self.invalidate()
615 self.invalidate()
616 return True
616 return True
617 else:
617 else:
618 self.ui.warn(_("no interrupted transaction available\n"))
618 self.ui.warn(_("no interrupted transaction available\n"))
619 return False
619 return False
620 finally:
620 finally:
621 lock.release()
621 lock.release()
622
622
623 def rollback(self):
623 def rollback(self):
624 wlock = lock = None
624 wlock = lock = None
625 try:
625 try:
626 wlock = self.wlock()
626 wlock = self.wlock()
627 lock = self.lock()
627 lock = self.lock()
628 if os.path.exists(self.sjoin("undo")):
628 if os.path.exists(self.sjoin("undo")):
629 self.ui.status(_("rolling back last transaction\n"))
629 self.ui.status(_("rolling back last transaction\n"))
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
630 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
631 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
632 try:
632 try:
633 branch = self.opener("undo.branch").read()
633 branch = self.opener("undo.branch").read()
634 self.dirstate.setbranch(branch)
634 self.dirstate.setbranch(branch)
635 except IOError:
635 except IOError:
636 self.ui.warn(_("Named branch could not be reset, "
636 self.ui.warn(_("Named branch could not be reset, "
637 "current branch still is: %s\n")
637 "current branch still is: %s\n")
638 % encoding.tolocal(self.dirstate.branch()))
638 % encoding.tolocal(self.dirstate.branch()))
639 self.invalidate()
639 self.invalidate()
640 self.dirstate.invalidate()
640 self.dirstate.invalidate()
641 else:
641 else:
642 self.ui.warn(_("no rollback information available\n"))
642 self.ui.warn(_("no rollback information available\n"))
643 finally:
643 finally:
644 release(lock, wlock)
644 release(lock, wlock)
645
645
646 def invalidate(self):
646 def invalidate(self):
647 for a in "changelog manifest".split():
647 for a in "changelog manifest".split():
648 if a in self.__dict__:
648 if a in self.__dict__:
649 delattr(self, a)
649 delattr(self, a)
650 self.tagscache = None
650 self.tagscache = None
651 self._tagstypecache = None
651 self._tagstypecache = None
652 self.nodetagscache = None
652 self.nodetagscache = None
653 self.branchcache = None
653 self.branchcache = None
654 self._ubranchcache = None
654 self._ubranchcache = None
655 self._branchcachetip = None
655 self._branchcachetip = None
656
656
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
657 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
658 try:
658 try:
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
659 l = lock.lock(lockname, 0, releasefn, desc=desc)
660 except error.LockHeld, inst:
660 except error.LockHeld, inst:
661 if not wait:
661 if not wait:
662 raise
662 raise
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
663 self.ui.warn(_("waiting for lock on %s held by %r\n") %
664 (desc, inst.locker))
664 (desc, inst.locker))
665 # default to 600 seconds timeout
665 # default to 600 seconds timeout
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
666 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
667 releasefn, desc=desc)
667 releasefn, desc=desc)
668 if acquirefn:
668 if acquirefn:
669 acquirefn()
669 acquirefn()
670 return l
670 return l
671
671
672 def lock(self, wait=True):
672 def lock(self, wait=True):
673 l = self._lockref and self._lockref()
673 l = self._lockref and self._lockref()
674 if l is not None and l.held:
674 if l is not None and l.held:
675 l.lock()
675 l.lock()
676 return l
676 return l
677
677
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
678 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
679 _('repository %s') % self.origroot)
679 _('repository %s') % self.origroot)
680 self._lockref = weakref.ref(l)
680 self._lockref = weakref.ref(l)
681 return l
681 return l
682
682
683 def wlock(self, wait=True):
683 def wlock(self, wait=True):
684 l = self._wlockref and self._wlockref()
684 l = self._wlockref and self._wlockref()
685 if l is not None and l.held:
685 if l is not None and l.held:
686 l.lock()
686 l.lock()
687 return l
687 return l
688
688
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
689 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
690 self.dirstate.invalidate, _('working directory of %s') %
690 self.dirstate.invalidate, _('working directory of %s') %
691 self.origroot)
691 self.origroot)
692 self._wlockref = weakref.ref(l)
692 self._wlockref = weakref.ref(l)
693 return l
693 return l
694
694
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
695 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
696 """
696 """
697 commit an individual file as part of a larger transaction
697 commit an individual file as part of a larger transaction
698 """
698 """
699
699
700 fname = fctx.path()
700 fname = fctx.path()
701 text = fctx.data()
701 text = fctx.data()
702 flog = self.file(fname)
702 flog = self.file(fname)
703 fparent1 = manifest1.get(fname, nullid)
703 fparent1 = manifest1.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
704 fparent2 = fparent2o = manifest2.get(fname, nullid)
705
705
706 meta = {}
706 meta = {}
707 copy = fctx.renamed()
707 copy = fctx.renamed()
708 if copy and copy[0] != fname:
708 if copy and copy[0] != fname:
709 # Mark the new revision of this file as a copy of another
709 # Mark the new revision of this file as a copy of another
710 # file. This copy data will effectively act as a parent
710 # file. This copy data will effectively act as a parent
711 # of this new revision. If this is a merge, the first
711 # of this new revision. If this is a merge, the first
712 # parent will be the nullid (meaning "look up the copy data")
712 # parent will be the nullid (meaning "look up the copy data")
713 # and the second one will be the other parent. For example:
713 # and the second one will be the other parent. For example:
714 #
714 #
715 # 0 --- 1 --- 3 rev1 changes file foo
715 # 0 --- 1 --- 3 rev1 changes file foo
716 # \ / rev2 renames foo to bar and changes it
716 # \ / rev2 renames foo to bar and changes it
717 # \- 2 -/ rev3 should have bar with all changes and
717 # \- 2 -/ rev3 should have bar with all changes and
718 # should record that bar descends from
718 # should record that bar descends from
719 # bar in rev2 and foo in rev1
719 # bar in rev2 and foo in rev1
720 #
720 #
721 # this allows this merge to succeed:
721 # this allows this merge to succeed:
722 #
722 #
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
723 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
724 # \ / merging rev3 and rev4 should use bar@rev2
725 # \- 2 --- 4 as the merge base
725 # \- 2 --- 4 as the merge base
726 #
726 #
727
727
728 cfname = copy[0]
728 cfname = copy[0]
729 crev = manifest1.get(cfname)
729 crev = manifest1.get(cfname)
730 newfparent = fparent2
730 newfparent = fparent2
731
731
732 if manifest2: # branch merge
732 if manifest2: # branch merge
733 if fparent2 == nullid or crev is None: # copied on remote side
733 if fparent2 == nullid or crev is None: # copied on remote side
734 if cfname in manifest2:
734 if cfname in manifest2:
735 crev = manifest2[cfname]
735 crev = manifest2[cfname]
736 newfparent = fparent1
736 newfparent = fparent1
737
737
738 # find source in nearest ancestor if we've lost track
738 # find source in nearest ancestor if we've lost track
739 if not crev:
739 if not crev:
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
740 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
741 (fname, cfname))
741 (fname, cfname))
742 for ancestor in self['.'].ancestors():
742 for ancestor in self['.'].ancestors():
743 if cfname in ancestor:
743 if cfname in ancestor:
744 crev = ancestor[cfname].filenode()
744 crev = ancestor[cfname].filenode()
745 break
745 break
746
746
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
747 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
748 meta["copy"] = cfname
748 meta["copy"] = cfname
749 meta["copyrev"] = hex(crev)
749 meta["copyrev"] = hex(crev)
750 fparent1, fparent2 = nullid, newfparent
750 fparent1, fparent2 = nullid, newfparent
751 elif fparent2 != nullid:
751 elif fparent2 != nullid:
752 # is one parent an ancestor of the other?
752 # is one parent an ancestor of the other?
753 fparentancestor = flog.ancestor(fparent1, fparent2)
753 fparentancestor = flog.ancestor(fparent1, fparent2)
754 if fparentancestor == fparent1:
754 if fparentancestor == fparent1:
755 fparent1, fparent2 = fparent2, nullid
755 fparent1, fparent2 = fparent2, nullid
756 elif fparentancestor == fparent2:
756 elif fparentancestor == fparent2:
757 fparent2 = nullid
757 fparent2 = nullid
758
758
759 # is the file changed?
759 # is the file changed?
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
760 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
761 changelist.append(fname)
761 changelist.append(fname)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
762 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
763
763
764 # are just the flags changed during merge?
764 # are just the flags changed during merge?
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
765 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
766 changelist.append(fname)
766 changelist.append(fname)
767
767
768 return fparent1
768 return fparent1
769
769
770 def commit(self, files=None, text="", user=None, date=None, match=None,
770 def commit(self, files=None, text="", user=None, date=None, match=None,
771 force=False, editor=False, extra={}):
771 force=False, editor=False, extra={}):
772 wlock = lock = None
772 wlock = lock = None
773 if extra.get("close"):
773 if extra.get("close"):
774 force = True
774 force = True
775 if files:
775 if files:
776 files = list(set(files))
776 files = list(set(files))
777
777
778 wlock = self.wlock()
778 wlock = self.wlock()
779 try:
779 try:
780 p1, p2 = self.dirstate.parents()
780 p1, p2 = self.dirstate.parents()
781
781
782 if (not force and p2 != nullid and
782 if (not force and p2 != nullid and
783 (match and (match.files() or match.anypats()))):
783 (match and (match.files() or match.anypats()))):
784 raise util.Abort(_('cannot partially commit a merge '
784 raise util.Abort(_('cannot partially commit a merge '
785 '(do not specify files or patterns)'))
785 '(do not specify files or patterns)'))
786
786
787 if files:
787 if files:
788 modified, removed = [], []
788 modified, removed = [], []
789 for f in files:
789 for f in files:
790 s = self.dirstate[f]
790 s = self.dirstate[f]
791 if s in 'nma':
791 if s in 'nma':
792 modified.append(f)
792 modified.append(f)
793 elif s == 'r':
793 elif s == 'r':
794 removed.append(f)
794 removed.append(f)
795 else:
795 else:
796 self.ui.warn(_("%s not tracked!\n") % f)
796 self.ui.warn(_("%s not tracked!\n") % f)
797 changes = [modified, [], removed, [], []]
797 changes = [modified, [], removed, [], []]
798 else:
798 else:
799 changes = self.status(match=match)
799 changes = self.status(match=match)
800
800
801 if (not (changes[0] or changes[1] or changes[2])
801 if (not (changes[0] or changes[1] or changes[2])
802 and not force and p2 == nullid and
802 and not force and p2 == nullid and
803 self[None].branch() == self['.'].branch()):
803 self[None].branch() == self['.'].branch()):
804 self.ui.status(_("nothing changed\n"))
804 self.ui.status(_("nothing changed\n"))
805 return None
805 return None
806
806
807 ms = merge_.mergestate(self)
807 ms = merge_.mergestate(self)
808 for f in changes[0]:
808 for f in changes[0]:
809 if f in ms and ms[f] == 'u':
809 if f in ms and ms[f] == 'u':
810 raise util.Abort(_("unresolved merge conflicts "
810 raise util.Abort(_("unresolved merge conflicts "
811 "(see hg resolve)"))
811 "(see hg resolve)"))
812 wctx = context.workingctx(self, (p1, p2), text, user, date,
812 wctx = context.workingctx(self, (p1, p2), text, user, date,
813 extra, changes)
813 extra, changes)
814 r = self.commitctx(wctx, editor, True)
814 r = self.commitctx(wctx, editor, True)
815 ms.reset()
815 ms.reset()
816 return r
816 return r
817
817
818 finally:
818 finally:
819 wlock.release()
819 wlock.release()
820
820
821 def commitctx(self, ctx, editor=None, working=False):
821 def commitctx(self, ctx, editor=None, working=False):
822 """Add a new revision to current repository.
822 """Add a new revision to current repository.
823
823
824 Revision information is passed via the context argument.
824 Revision information is passed via the context argument.
825 If editor is supplied, it is called to get a commit message.
825 If editor is supplied, it is called to get a commit message.
826 If working is set, the working directory is affected.
826 If working is set, the working directory is affected.
827 """
827 """
828
828
829 tr = lock = None
829 tr = lock = None
830 valid = 0 # don't save the dirstate if this isn't set
830 valid = 0 # don't save the dirstate if this isn't set
831 remove = ctx.removed()
831 remove = ctx.removed()
832
832 p1, p2 = ctx.p1(), ctx.p2()
833 p1, p2 = [p.node() for p in ctx.parents()]
833 m1 = p1.manifest().copy()
834 c1 = self.changelog.read(p1)
834 m2 = p2.manifest()
835 c2 = self.changelog.read(p2)
836 m1 = self.manifest.read(c1[0]).copy()
837 m2 = self.manifest.read(c2[0])
838 user = ctx.user()
835 user = ctx.user()
839
836
840 xp1, xp2 = hex(p1), hex(p2)
837 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
841 if p2 == nullid:
842 xp2 = ''
843 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
844
839
845 lock = self.lock()
840 lock = self.lock()
846 try:
841 try:
847 tr = self.transaction()
842 tr = self.transaction()
848 trp = weakref.proxy(tr)
843 trp = weakref.proxy(tr)
849
844
850 # check in files
845 # check in files
851 new = {}
846 new = {}
852 changed = []
847 changed = []
853 linkrev = len(self)
848 linkrev = len(self)
854 for f in sorted(ctx.modified() + ctx.added()):
849 for f in sorted(ctx.modified() + ctx.added()):
855 self.ui.note(f + "\n")
850 self.ui.note(f + "\n")
856 try:
851 try:
857 fctx = ctx[f]
852 fctx = ctx[f]
858 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
853 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
859 changed)
854 changed)
860 m1.set(f, fctx.flags())
855 m1.set(f, fctx.flags())
861 if working:
856 if working:
862 self.dirstate.normal(f)
857 self.dirstate.normal(f)
863
858
864 except (OSError, IOError):
859 except (OSError, IOError):
865 if working:
860 if working:
866 self.ui.warn(_("trouble committing %s!\n") % f)
861 self.ui.warn(_("trouble committing %s!\n") % f)
867 raise
862 raise
868 else:
863 else:
869 remove.append(f)
864 remove.append(f)
870
865
871 updated, added = [], []
866 updated, added = [], []
872 for f in sorted(changed):
867 for f in sorted(changed):
873 if f in m1 or f in m2:
868 if f in m1 or f in m2:
874 updated.append(f)
869 updated.append(f)
875 else:
870 else:
876 added.append(f)
871 added.append(f)
877
872
878 # update manifest
873 # update manifest
879 m1.update(new)
874 m1.update(new)
880 removed = [f for f in sorted(remove) if f in m1 or f in m2]
875 removed = [f for f in sorted(remove) if f in m1 or f in m2]
881 removed1 = []
876 removed1 = []
882
877
883 for f in removed:
878 for f in removed:
884 if f in m1:
879 if f in m1:
885 del m1[f]
880 del m1[f]
886 removed1.append(f)
881 removed1.append(f)
887 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
882 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
888 (new, removed1))
883 p2.manifestnode(), (new, removed1))
889
884
890 text = ctx.description()
885 text = ctx.description()
891 if editor:
886 if editor:
892 text = editor(self, ctx, added, updated, removed)
887 text = editor(self, ctx, added, updated, removed)
893
888
894 lines = [line.rstrip() for line in text.rstrip().splitlines()]
889 lines = [line.rstrip() for line in text.rstrip().splitlines()]
895 while lines and not lines[0]:
890 while lines and not lines[0]:
896 del lines[0]
891 del lines[0]
897 text = '\n'.join(lines)
892 text = '\n'.join(lines)
898
893
899 self.changelog.delayupdate()
894 self.changelog.delayupdate()
900 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
895 n = self.changelog.add(mn, changed + removed, text, trp,
896 p1.node(), p2.node(),
901 user, ctx.date(), ctx.extra().copy())
897 user, ctx.date(), ctx.extra().copy())
902 p = lambda: self.changelog.writepending() and self.root or ""
898 p = lambda: self.changelog.writepending() and self.root or ""
903 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
899 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
904 parent2=xp2, pending=p)
900 parent2=xp2, pending=p)
905 self.changelog.finalize(trp)
901 self.changelog.finalize(trp)
906 tr.close()
902 tr.close()
907
903
908 if self.branchcache:
904 if self.branchcache:
909 self.branchtags()
905 self.branchtags()
910
906
911 if working:
907 if working:
912 self.dirstate.setparents(n)
908 self.dirstate.setparents(n)
913 for f in removed:
909 for f in removed:
914 self.dirstate.forget(f)
910 self.dirstate.forget(f)
915 valid = 1 # our dirstate updates are complete
911 valid = 1 # our dirstate updates are complete
916
912
917 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
913 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
918 return n
914 return n
919 finally:
915 finally:
920 if not valid: # don't save our updated dirstate
916 if not valid: # don't save our updated dirstate
921 self.dirstate.invalidate()
917 self.dirstate.invalidate()
922 del tr
918 del tr
923 lock.release()
919 lock.release()
924
920
925 def walk(self, match, node=None):
921 def walk(self, match, node=None):
926 '''
922 '''
927 walk recursively through the directory tree or a given
923 walk recursively through the directory tree or a given
928 changeset, finding all files matched by the match
924 changeset, finding all files matched by the match
929 function
925 function
930 '''
926 '''
931 return self[node].walk(match)
927 return self[node].walk(match)
932
928
933 def status(self, node1='.', node2=None, match=None,
929 def status(self, node1='.', node2=None, match=None,
934 ignored=False, clean=False, unknown=False):
930 ignored=False, clean=False, unknown=False):
935 """return status of files between two nodes or node and working directory
931 """return status of files between two nodes or node and working directory
936
932
937 If node1 is None, use the first dirstate parent instead.
933 If node1 is None, use the first dirstate parent instead.
938 If node2 is None, compare node1 with working directory.
934 If node2 is None, compare node1 with working directory.
939 """
935 """
940
936
941 def mfmatches(ctx):
937 def mfmatches(ctx):
942 mf = ctx.manifest().copy()
938 mf = ctx.manifest().copy()
943 for fn in mf.keys():
939 for fn in mf.keys():
944 if not match(fn):
940 if not match(fn):
945 del mf[fn]
941 del mf[fn]
946 return mf
942 return mf
947
943
948 if isinstance(node1, context.changectx):
944 if isinstance(node1, context.changectx):
949 ctx1 = node1
945 ctx1 = node1
950 else:
946 else:
951 ctx1 = self[node1]
947 ctx1 = self[node1]
952 if isinstance(node2, context.changectx):
948 if isinstance(node2, context.changectx):
953 ctx2 = node2
949 ctx2 = node2
954 else:
950 else:
955 ctx2 = self[node2]
951 ctx2 = self[node2]
956
952
957 working = ctx2.rev() is None
953 working = ctx2.rev() is None
958 parentworking = working and ctx1 == self['.']
954 parentworking = working and ctx1 == self['.']
959 match = match or match_.always(self.root, self.getcwd())
955 match = match or match_.always(self.root, self.getcwd())
960 listignored, listclean, listunknown = ignored, clean, unknown
956 listignored, listclean, listunknown = ignored, clean, unknown
961
957
962 # load earliest manifest first for caching reasons
958 # load earliest manifest first for caching reasons
963 if not working and ctx2.rev() < ctx1.rev():
959 if not working and ctx2.rev() < ctx1.rev():
964 ctx2.manifest()
960 ctx2.manifest()
965
961
966 if not parentworking:
962 if not parentworking:
967 def bad(f, msg):
963 def bad(f, msg):
968 if f not in ctx1:
964 if f not in ctx1:
969 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
965 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
970 return False
966 return False
971 match.bad = bad
967 match.bad = bad
972
968
973 if working: # we need to scan the working dir
969 if working: # we need to scan the working dir
974 s = self.dirstate.status(match, listignored, listclean, listunknown)
970 s = self.dirstate.status(match, listignored, listclean, listunknown)
975 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
971 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
976
972
977 # check for any possibly clean files
973 # check for any possibly clean files
978 if parentworking and cmp:
974 if parentworking and cmp:
979 fixup = []
975 fixup = []
980 # do a full compare of any files that might have changed
976 # do a full compare of any files that might have changed
981 for f in sorted(cmp):
977 for f in sorted(cmp):
982 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
978 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
983 or ctx1[f].cmp(ctx2[f].data())):
979 or ctx1[f].cmp(ctx2[f].data())):
984 modified.append(f)
980 modified.append(f)
985 else:
981 else:
986 fixup.append(f)
982 fixup.append(f)
987
983
988 if listclean:
984 if listclean:
989 clean += fixup
985 clean += fixup
990
986
991 # update dirstate for files that are actually clean
987 # update dirstate for files that are actually clean
992 if fixup:
988 if fixup:
993 wlock = None
989 wlock = None
994 try:
990 try:
995 try:
991 try:
996 # updating the dirstate is optional
992 # updating the dirstate is optional
997 # so we don't wait on the lock
993 # so we don't wait on the lock
998 wlock = self.wlock(False)
994 wlock = self.wlock(False)
999 for f in fixup:
995 for f in fixup:
1000 self.dirstate.normal(f)
996 self.dirstate.normal(f)
1001 except error.LockError:
997 except error.LockError:
1002 pass
998 pass
1003 finally:
999 finally:
1004 release(wlock)
1000 release(wlock)
1005
1001
1006 if not parentworking:
1002 if not parentworking:
1007 mf1 = mfmatches(ctx1)
1003 mf1 = mfmatches(ctx1)
1008 if working:
1004 if working:
1009 # we are comparing working dir against non-parent
1005 # we are comparing working dir against non-parent
1010 # generate a pseudo-manifest for the working dir
1006 # generate a pseudo-manifest for the working dir
1011 mf2 = mfmatches(self['.'])
1007 mf2 = mfmatches(self['.'])
1012 for f in cmp + modified + added:
1008 for f in cmp + modified + added:
1013 mf2[f] = None
1009 mf2[f] = None
1014 mf2.set(f, ctx2.flags(f))
1010 mf2.set(f, ctx2.flags(f))
1015 for f in removed:
1011 for f in removed:
1016 if f in mf2:
1012 if f in mf2:
1017 del mf2[f]
1013 del mf2[f]
1018 else:
1014 else:
1019 # we are comparing two revisions
1015 # we are comparing two revisions
1020 deleted, unknown, ignored = [], [], []
1016 deleted, unknown, ignored = [], [], []
1021 mf2 = mfmatches(ctx2)
1017 mf2 = mfmatches(ctx2)
1022
1018
1023 modified, added, clean = [], [], []
1019 modified, added, clean = [], [], []
1024 for fn in mf2:
1020 for fn in mf2:
1025 if fn in mf1:
1021 if fn in mf1:
1026 if (mf1.flags(fn) != mf2.flags(fn) or
1022 if (mf1.flags(fn) != mf2.flags(fn) or
1027 (mf1[fn] != mf2[fn] and
1023 (mf1[fn] != mf2[fn] and
1028 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1024 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1029 modified.append(fn)
1025 modified.append(fn)
1030 elif listclean:
1026 elif listclean:
1031 clean.append(fn)
1027 clean.append(fn)
1032 del mf1[fn]
1028 del mf1[fn]
1033 else:
1029 else:
1034 added.append(fn)
1030 added.append(fn)
1035 removed = mf1.keys()
1031 removed = mf1.keys()
1036
1032
1037 r = modified, added, removed, deleted, unknown, ignored, clean
1033 r = modified, added, removed, deleted, unknown, ignored, clean
1038 [l.sort() for l in r]
1034 [l.sort() for l in r]
1039 return r
1035 return r
1040
1036
1041 def add(self, list):
1037 def add(self, list):
1042 wlock = self.wlock()
1038 wlock = self.wlock()
1043 try:
1039 try:
1044 rejected = []
1040 rejected = []
1045 for f in list:
1041 for f in list:
1046 p = self.wjoin(f)
1042 p = self.wjoin(f)
1047 try:
1043 try:
1048 st = os.lstat(p)
1044 st = os.lstat(p)
1049 except:
1045 except:
1050 self.ui.warn(_("%s does not exist!\n") % f)
1046 self.ui.warn(_("%s does not exist!\n") % f)
1051 rejected.append(f)
1047 rejected.append(f)
1052 continue
1048 continue
1053 if st.st_size > 10000000:
1049 if st.st_size > 10000000:
1054 self.ui.warn(_("%s: files over 10MB may cause memory and"
1050 self.ui.warn(_("%s: files over 10MB may cause memory and"
1055 " performance problems\n"
1051 " performance problems\n"
1056 "(use 'hg revert %s' to unadd the file)\n")
1052 "(use 'hg revert %s' to unadd the file)\n")
1057 % (f, f))
1053 % (f, f))
1058 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1054 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1059 self.ui.warn(_("%s not added: only files and symlinks "
1055 self.ui.warn(_("%s not added: only files and symlinks "
1060 "supported currently\n") % f)
1056 "supported currently\n") % f)
1061 rejected.append(p)
1057 rejected.append(p)
1062 elif self.dirstate[f] in 'amn':
1058 elif self.dirstate[f] in 'amn':
1063 self.ui.warn(_("%s already tracked!\n") % f)
1059 self.ui.warn(_("%s already tracked!\n") % f)
1064 elif self.dirstate[f] == 'r':
1060 elif self.dirstate[f] == 'r':
1065 self.dirstate.normallookup(f)
1061 self.dirstate.normallookup(f)
1066 else:
1062 else:
1067 self.dirstate.add(f)
1063 self.dirstate.add(f)
1068 return rejected
1064 return rejected
1069 finally:
1065 finally:
1070 wlock.release()
1066 wlock.release()
1071
1067
1072 def forget(self, list):
1068 def forget(self, list):
1073 wlock = self.wlock()
1069 wlock = self.wlock()
1074 try:
1070 try:
1075 for f in list:
1071 for f in list:
1076 if self.dirstate[f] != 'a':
1072 if self.dirstate[f] != 'a':
1077 self.ui.warn(_("%s not added!\n") % f)
1073 self.ui.warn(_("%s not added!\n") % f)
1078 else:
1074 else:
1079 self.dirstate.forget(f)
1075 self.dirstate.forget(f)
1080 finally:
1076 finally:
1081 wlock.release()
1077 wlock.release()
1082
1078
1083 def remove(self, list, unlink=False):
1079 def remove(self, list, unlink=False):
1084 wlock = None
1080 wlock = None
1085 try:
1081 try:
1086 if unlink:
1082 if unlink:
1087 for f in list:
1083 for f in list:
1088 try:
1084 try:
1089 util.unlink(self.wjoin(f))
1085 util.unlink(self.wjoin(f))
1090 except OSError, inst:
1086 except OSError, inst:
1091 if inst.errno != errno.ENOENT:
1087 if inst.errno != errno.ENOENT:
1092 raise
1088 raise
1093 wlock = self.wlock()
1089 wlock = self.wlock()
1094 for f in list:
1090 for f in list:
1095 if unlink and os.path.exists(self.wjoin(f)):
1091 if unlink and os.path.exists(self.wjoin(f)):
1096 self.ui.warn(_("%s still exists!\n") % f)
1092 self.ui.warn(_("%s still exists!\n") % f)
1097 elif self.dirstate[f] == 'a':
1093 elif self.dirstate[f] == 'a':
1098 self.dirstate.forget(f)
1094 self.dirstate.forget(f)
1099 elif f not in self.dirstate:
1095 elif f not in self.dirstate:
1100 self.ui.warn(_("%s not tracked!\n") % f)
1096 self.ui.warn(_("%s not tracked!\n") % f)
1101 else:
1097 else:
1102 self.dirstate.remove(f)
1098 self.dirstate.remove(f)
1103 finally:
1099 finally:
1104 release(wlock)
1100 release(wlock)
1105
1101
1106 def undelete(self, list):
1102 def undelete(self, list):
1107 manifests = [self.manifest.read(self.changelog.read(p)[0])
1103 manifests = [self.manifest.read(self.changelog.read(p)[0])
1108 for p in self.dirstate.parents() if p != nullid]
1104 for p in self.dirstate.parents() if p != nullid]
1109 wlock = self.wlock()
1105 wlock = self.wlock()
1110 try:
1106 try:
1111 for f in list:
1107 for f in list:
1112 if self.dirstate[f] != 'r':
1108 if self.dirstate[f] != 'r':
1113 self.ui.warn(_("%s not removed!\n") % f)
1109 self.ui.warn(_("%s not removed!\n") % f)
1114 else:
1110 else:
1115 m = f in manifests[0] and manifests[0] or manifests[1]
1111 m = f in manifests[0] and manifests[0] or manifests[1]
1116 t = self.file(f).read(m[f])
1112 t = self.file(f).read(m[f])
1117 self.wwrite(f, t, m.flags(f))
1113 self.wwrite(f, t, m.flags(f))
1118 self.dirstate.normal(f)
1114 self.dirstate.normal(f)
1119 finally:
1115 finally:
1120 wlock.release()
1116 wlock.release()
1121
1117
1122 def copy(self, source, dest):
1118 def copy(self, source, dest):
1123 p = self.wjoin(dest)
1119 p = self.wjoin(dest)
1124 if not (os.path.exists(p) or os.path.islink(p)):
1120 if not (os.path.exists(p) or os.path.islink(p)):
1125 self.ui.warn(_("%s does not exist!\n") % dest)
1121 self.ui.warn(_("%s does not exist!\n") % dest)
1126 elif not (os.path.isfile(p) or os.path.islink(p)):
1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1127 self.ui.warn(_("copy failed: %s is not a file or a "
1123 self.ui.warn(_("copy failed: %s is not a file or a "
1128 "symbolic link\n") % dest)
1124 "symbolic link\n") % dest)
1129 else:
1125 else:
1130 wlock = self.wlock()
1126 wlock = self.wlock()
1131 try:
1127 try:
1132 if self.dirstate[dest] in '?r':
1128 if self.dirstate[dest] in '?r':
1133 self.dirstate.add(dest)
1129 self.dirstate.add(dest)
1134 self.dirstate.copy(source, dest)
1130 self.dirstate.copy(source, dest)
1135 finally:
1131 finally:
1136 wlock.release()
1132 wlock.release()
1137
1133
1138 def heads(self, start=None, closed=True):
1134 def heads(self, start=None, closed=True):
1139 heads = self.changelog.heads(start)
1135 heads = self.changelog.heads(start)
1140 def display(head):
1136 def display(head):
1141 if closed:
1137 if closed:
1142 return True
1138 return True
1143 extras = self.changelog.read(head)[5]
1139 extras = self.changelog.read(head)[5]
1144 return ('close' not in extras)
1140 return ('close' not in extras)
1145 # sort the output in rev descending order
1141 # sort the output in rev descending order
1146 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1142 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1147 return [n for (r, n) in sorted(heads)]
1143 return [n for (r, n) in sorted(heads)]
1148
1144
1149 def branchheads(self, branch=None, start=None, closed=True):
1145 def branchheads(self, branch=None, start=None, closed=True):
1150 if branch is None:
1146 if branch is None:
1151 branch = self[None].branch()
1147 branch = self[None].branch()
1152 branches = self._branchheads()
1148 branches = self._branchheads()
1153 if branch not in branches:
1149 if branch not in branches:
1154 return []
1150 return []
1155 bheads = branches[branch]
1151 bheads = branches[branch]
1156 # the cache returns heads ordered lowest to highest
1152 # the cache returns heads ordered lowest to highest
1157 bheads.reverse()
1153 bheads.reverse()
1158 if start is not None:
1154 if start is not None:
1159 # filter out the heads that cannot be reached from startrev
1155 # filter out the heads that cannot be reached from startrev
1160 bheads = self.changelog.nodesbetween([start], bheads)[2]
1156 bheads = self.changelog.nodesbetween([start], bheads)[2]
1161 if not closed:
1157 if not closed:
1162 bheads = [h for h in bheads if
1158 bheads = [h for h in bheads if
1163 ('close' not in self.changelog.read(h)[5])]
1159 ('close' not in self.changelog.read(h)[5])]
1164 return bheads
1160 return bheads
1165
1161
1166 def branches(self, nodes):
1162 def branches(self, nodes):
1167 if not nodes:
1163 if not nodes:
1168 nodes = [self.changelog.tip()]
1164 nodes = [self.changelog.tip()]
1169 b = []
1165 b = []
1170 for n in nodes:
1166 for n in nodes:
1171 t = n
1167 t = n
1172 while 1:
1168 while 1:
1173 p = self.changelog.parents(n)
1169 p = self.changelog.parents(n)
1174 if p[1] != nullid or p[0] == nullid:
1170 if p[1] != nullid or p[0] == nullid:
1175 b.append((t, n, p[0], p[1]))
1171 b.append((t, n, p[0], p[1]))
1176 break
1172 break
1177 n = p[0]
1173 n = p[0]
1178 return b
1174 return b
1179
1175
1180 def between(self, pairs):
1176 def between(self, pairs):
1181 r = []
1177 r = []
1182
1178
1183 for top, bottom in pairs:
1179 for top, bottom in pairs:
1184 n, l, i = top, [], 0
1180 n, l, i = top, [], 0
1185 f = 1
1181 f = 1
1186
1182
1187 while n != bottom and n != nullid:
1183 while n != bottom and n != nullid:
1188 p = self.changelog.parents(n)[0]
1184 p = self.changelog.parents(n)[0]
1189 if i == f:
1185 if i == f:
1190 l.append(n)
1186 l.append(n)
1191 f = f * 2
1187 f = f * 2
1192 n = p
1188 n = p
1193 i += 1
1189 i += 1
1194
1190
1195 r.append(l)
1191 r.append(l)
1196
1192
1197 return r
1193 return r
1198
1194
1199 def findincoming(self, remote, base=None, heads=None, force=False):
1195 def findincoming(self, remote, base=None, heads=None, force=False):
1200 """Return list of roots of the subsets of missing nodes from remote
1196 """Return list of roots of the subsets of missing nodes from remote
1201
1197
1202 If base dict is specified, assume that these nodes and their parents
1198 If base dict is specified, assume that these nodes and their parents
1203 exist on the remote side and that no child of a node of base exists
1199 exist on the remote side and that no child of a node of base exists
1204 in both remote and self.
1200 in both remote and self.
1205 Furthermore base will be updated to include the nodes that exists
1201 Furthermore base will be updated to include the nodes that exists
1206 in self and remote but no children exists in self and remote.
1202 in self and remote but no children exists in self and remote.
1207 If a list of heads is specified, return only nodes which are heads
1203 If a list of heads is specified, return only nodes which are heads
1208 or ancestors of these heads.
1204 or ancestors of these heads.
1209
1205
1210 All the ancestors of base are in self and in remote.
1206 All the ancestors of base are in self and in remote.
1211 All the descendants of the list returned are missing in self.
1207 All the descendants of the list returned are missing in self.
1212 (and so we know that the rest of the nodes are missing in remote, see
1208 (and so we know that the rest of the nodes are missing in remote, see
1213 outgoing)
1209 outgoing)
1214 """
1210 """
1215 return self.findcommonincoming(remote, base, heads, force)[1]
1211 return self.findcommonincoming(remote, base, heads, force)[1]
1216
1212
1217 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1213 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1218 """Return a tuple (common, missing roots, heads) used to identify
1214 """Return a tuple (common, missing roots, heads) used to identify
1219 missing nodes from remote.
1215 missing nodes from remote.
1220
1216
1221 If base dict is specified, assume that these nodes and their parents
1217 If base dict is specified, assume that these nodes and their parents
1222 exist on the remote side and that no child of a node of base exists
1218 exist on the remote side and that no child of a node of base exists
1223 in both remote and self.
1219 in both remote and self.
1224 Furthermore base will be updated to include the nodes that exists
1220 Furthermore base will be updated to include the nodes that exists
1225 in self and remote but no children exists in self and remote.
1221 in self and remote but no children exists in self and remote.
1226 If a list of heads is specified, return only nodes which are heads
1222 If a list of heads is specified, return only nodes which are heads
1227 or ancestors of these heads.
1223 or ancestors of these heads.
1228
1224
1229 All the ancestors of base are in self and in remote.
1225 All the ancestors of base are in self and in remote.
1230 """
1226 """
1231 m = self.changelog.nodemap
1227 m = self.changelog.nodemap
1232 search = []
1228 search = []
1233 fetch = set()
1229 fetch = set()
1234 seen = set()
1230 seen = set()
1235 seenbranch = set()
1231 seenbranch = set()
1236 if base == None:
1232 if base == None:
1237 base = {}
1233 base = {}
1238
1234
1239 if not heads:
1235 if not heads:
1240 heads = remote.heads()
1236 heads = remote.heads()
1241
1237
1242 if self.changelog.tip() == nullid:
1238 if self.changelog.tip() == nullid:
1243 base[nullid] = 1
1239 base[nullid] = 1
1244 if heads != [nullid]:
1240 if heads != [nullid]:
1245 return [nullid], [nullid], list(heads)
1241 return [nullid], [nullid], list(heads)
1246 return [nullid], [], []
1242 return [nullid], [], []
1247
1243
1248 # assume we're closer to the tip than the root
1244 # assume we're closer to the tip than the root
1249 # and start by examining the heads
1245 # and start by examining the heads
1250 self.ui.status(_("searching for changes\n"))
1246 self.ui.status(_("searching for changes\n"))
1251
1247
1252 unknown = []
1248 unknown = []
1253 for h in heads:
1249 for h in heads:
1254 if h not in m:
1250 if h not in m:
1255 unknown.append(h)
1251 unknown.append(h)
1256 else:
1252 else:
1257 base[h] = 1
1253 base[h] = 1
1258
1254
1259 heads = unknown
1255 heads = unknown
1260 if not unknown:
1256 if not unknown:
1261 return base.keys(), [], []
1257 return base.keys(), [], []
1262
1258
1263 req = set(unknown)
1259 req = set(unknown)
1264 reqcnt = 0
1260 reqcnt = 0
1265
1261
1266 # search through remote branches
1262 # search through remote branches
1267 # a 'branch' here is a linear segment of history, with four parts:
1263 # a 'branch' here is a linear segment of history, with four parts:
1268 # head, root, first parent, second parent
1264 # head, root, first parent, second parent
1269 # (a branch always has two parents (or none) by definition)
1265 # (a branch always has two parents (or none) by definition)
1270 unknown = remote.branches(unknown)
1266 unknown = remote.branches(unknown)
1271 while unknown:
1267 while unknown:
1272 r = []
1268 r = []
1273 while unknown:
1269 while unknown:
1274 n = unknown.pop(0)
1270 n = unknown.pop(0)
1275 if n[0] in seen:
1271 if n[0] in seen:
1276 continue
1272 continue
1277
1273
1278 self.ui.debug(_("examining %s:%s\n")
1274 self.ui.debug(_("examining %s:%s\n")
1279 % (short(n[0]), short(n[1])))
1275 % (short(n[0]), short(n[1])))
1280 if n[0] == nullid: # found the end of the branch
1276 if n[0] == nullid: # found the end of the branch
1281 pass
1277 pass
1282 elif n in seenbranch:
1278 elif n in seenbranch:
1283 self.ui.debug(_("branch already found\n"))
1279 self.ui.debug(_("branch already found\n"))
1284 continue
1280 continue
1285 elif n[1] and n[1] in m: # do we know the base?
1281 elif n[1] and n[1] in m: # do we know the base?
1286 self.ui.debug(_("found incomplete branch %s:%s\n")
1282 self.ui.debug(_("found incomplete branch %s:%s\n")
1287 % (short(n[0]), short(n[1])))
1283 % (short(n[0]), short(n[1])))
1288 search.append(n[0:2]) # schedule branch range for scanning
1284 search.append(n[0:2]) # schedule branch range for scanning
1289 seenbranch.add(n)
1285 seenbranch.add(n)
1290 else:
1286 else:
1291 if n[1] not in seen and n[1] not in fetch:
1287 if n[1] not in seen and n[1] not in fetch:
1292 if n[2] in m and n[3] in m:
1288 if n[2] in m and n[3] in m:
1293 self.ui.debug(_("found new changeset %s\n") %
1289 self.ui.debug(_("found new changeset %s\n") %
1294 short(n[1]))
1290 short(n[1]))
1295 fetch.add(n[1]) # earliest unknown
1291 fetch.add(n[1]) # earliest unknown
1296 for p in n[2:4]:
1292 for p in n[2:4]:
1297 if p in m:
1293 if p in m:
1298 base[p] = 1 # latest known
1294 base[p] = 1 # latest known
1299
1295
1300 for p in n[2:4]:
1296 for p in n[2:4]:
1301 if p not in req and p not in m:
1297 if p not in req and p not in m:
1302 r.append(p)
1298 r.append(p)
1303 req.add(p)
1299 req.add(p)
1304 seen.add(n[0])
1300 seen.add(n[0])
1305
1301
1306 if r:
1302 if r:
1307 reqcnt += 1
1303 reqcnt += 1
1308 self.ui.debug(_("request %d: %s\n") %
1304 self.ui.debug(_("request %d: %s\n") %
1309 (reqcnt, " ".join(map(short, r))))
1305 (reqcnt, " ".join(map(short, r))))
1310 for p in xrange(0, len(r), 10):
1306 for p in xrange(0, len(r), 10):
1311 for b in remote.branches(r[p:p+10]):
1307 for b in remote.branches(r[p:p+10]):
1312 self.ui.debug(_("received %s:%s\n") %
1308 self.ui.debug(_("received %s:%s\n") %
1313 (short(b[0]), short(b[1])))
1309 (short(b[0]), short(b[1])))
1314 unknown.append(b)
1310 unknown.append(b)
1315
1311
1316 # do binary search on the branches we found
1312 # do binary search on the branches we found
1317 while search:
1313 while search:
1318 newsearch = []
1314 newsearch = []
1319 reqcnt += 1
1315 reqcnt += 1
1320 for n, l in zip(search, remote.between(search)):
1316 for n, l in zip(search, remote.between(search)):
1321 l.append(n[1])
1317 l.append(n[1])
1322 p = n[0]
1318 p = n[0]
1323 f = 1
1319 f = 1
1324 for i in l:
1320 for i in l:
1325 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1321 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1326 if i in m:
1322 if i in m:
1327 if f <= 2:
1323 if f <= 2:
1328 self.ui.debug(_("found new branch changeset %s\n") %
1324 self.ui.debug(_("found new branch changeset %s\n") %
1329 short(p))
1325 short(p))
1330 fetch.add(p)
1326 fetch.add(p)
1331 base[i] = 1
1327 base[i] = 1
1332 else:
1328 else:
1333 self.ui.debug(_("narrowed branch search to %s:%s\n")
1329 self.ui.debug(_("narrowed branch search to %s:%s\n")
1334 % (short(p), short(i)))
1330 % (short(p), short(i)))
1335 newsearch.append((p, i))
1331 newsearch.append((p, i))
1336 break
1332 break
1337 p, f = i, f * 2
1333 p, f = i, f * 2
1338 search = newsearch
1334 search = newsearch
1339
1335
1340 # sanity check our fetch list
1336 # sanity check our fetch list
1341 for f in fetch:
1337 for f in fetch:
1342 if f in m:
1338 if f in m:
1343 raise error.RepoError(_("already have changeset ")
1339 raise error.RepoError(_("already have changeset ")
1344 + short(f[:4]))
1340 + short(f[:4]))
1345
1341
1346 if base.keys() == [nullid]:
1342 if base.keys() == [nullid]:
1347 if force:
1343 if force:
1348 self.ui.warn(_("warning: repository is unrelated\n"))
1344 self.ui.warn(_("warning: repository is unrelated\n"))
1349 else:
1345 else:
1350 raise util.Abort(_("repository is unrelated"))
1346 raise util.Abort(_("repository is unrelated"))
1351
1347
1352 self.ui.debug(_("found new changesets starting at ") +
1348 self.ui.debug(_("found new changesets starting at ") +
1353 " ".join([short(f) for f in fetch]) + "\n")
1349 " ".join([short(f) for f in fetch]) + "\n")
1354
1350
1355 self.ui.debug(_("%d total queries\n") % reqcnt)
1351 self.ui.debug(_("%d total queries\n") % reqcnt)
1356
1352
1357 return base.keys(), list(fetch), heads
1353 return base.keys(), list(fetch), heads
1358
1354
1359 def findoutgoing(self, remote, base=None, heads=None, force=False):
1355 def findoutgoing(self, remote, base=None, heads=None, force=False):
1360 """Return list of nodes that are roots of subsets not in remote
1356 """Return list of nodes that are roots of subsets not in remote
1361
1357
1362 If base dict is specified, assume that these nodes and their parents
1358 If base dict is specified, assume that these nodes and their parents
1363 exist on the remote side.
1359 exist on the remote side.
1364 If a list of heads is specified, return only nodes which are heads
1360 If a list of heads is specified, return only nodes which are heads
1365 or ancestors of these heads, and return a second element which
1361 or ancestors of these heads, and return a second element which
1366 contains all remote heads which get new children.
1362 contains all remote heads which get new children.
1367 """
1363 """
1368 if base == None:
1364 if base == None:
1369 base = {}
1365 base = {}
1370 self.findincoming(remote, base, heads, force=force)
1366 self.findincoming(remote, base, heads, force=force)
1371
1367
1372 self.ui.debug(_("common changesets up to ")
1368 self.ui.debug(_("common changesets up to ")
1373 + " ".join(map(short, base.keys())) + "\n")
1369 + " ".join(map(short, base.keys())) + "\n")
1374
1370
1375 remain = set(self.changelog.nodemap)
1371 remain = set(self.changelog.nodemap)
1376
1372
1377 # prune everything remote has from the tree
1373 # prune everything remote has from the tree
1378 remain.remove(nullid)
1374 remain.remove(nullid)
1379 remove = base.keys()
1375 remove = base.keys()
1380 while remove:
1376 while remove:
1381 n = remove.pop(0)
1377 n = remove.pop(0)
1382 if n in remain:
1378 if n in remain:
1383 remain.remove(n)
1379 remain.remove(n)
1384 for p in self.changelog.parents(n):
1380 for p in self.changelog.parents(n):
1385 remove.append(p)
1381 remove.append(p)
1386
1382
1387 # find every node whose parents have been pruned
1383 # find every node whose parents have been pruned
1388 subset = []
1384 subset = []
1389 # find every remote head that will get new children
1385 # find every remote head that will get new children
1390 updated_heads = {}
1386 updated_heads = {}
1391 for n in remain:
1387 for n in remain:
1392 p1, p2 = self.changelog.parents(n)
1388 p1, p2 = self.changelog.parents(n)
1393 if p1 not in remain and p2 not in remain:
1389 if p1 not in remain and p2 not in remain:
1394 subset.append(n)
1390 subset.append(n)
1395 if heads:
1391 if heads:
1396 if p1 in heads:
1392 if p1 in heads:
1397 updated_heads[p1] = True
1393 updated_heads[p1] = True
1398 if p2 in heads:
1394 if p2 in heads:
1399 updated_heads[p2] = True
1395 updated_heads[p2] = True
1400
1396
1401 # this is the set of all roots we have to push
1397 # this is the set of all roots we have to push
1402 if heads:
1398 if heads:
1403 return subset, updated_heads.keys()
1399 return subset, updated_heads.keys()
1404 else:
1400 else:
1405 return subset
1401 return subset
1406
1402
1407 def pull(self, remote, heads=None, force=False):
1403 def pull(self, remote, heads=None, force=False):
1408 lock = self.lock()
1404 lock = self.lock()
1409 try:
1405 try:
1410 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1406 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1411 force=force)
1407 force=force)
1412 if fetch == [nullid]:
1408 if fetch == [nullid]:
1413 self.ui.status(_("requesting all changes\n"))
1409 self.ui.status(_("requesting all changes\n"))
1414
1410
1415 if not fetch:
1411 if not fetch:
1416 self.ui.status(_("no changes found\n"))
1412 self.ui.status(_("no changes found\n"))
1417 return 0
1413 return 0
1418
1414
1419 if heads is None and remote.capable('changegroupsubset'):
1415 if heads is None and remote.capable('changegroupsubset'):
1420 heads = rheads
1416 heads = rheads
1421
1417
1422 if heads is None:
1418 if heads is None:
1423 cg = remote.changegroup(fetch, 'pull')
1419 cg = remote.changegroup(fetch, 'pull')
1424 else:
1420 else:
1425 if not remote.capable('changegroupsubset'):
1421 if not remote.capable('changegroupsubset'):
1426 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1422 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1427 cg = remote.changegroupsubset(fetch, heads, 'pull')
1423 cg = remote.changegroupsubset(fetch, heads, 'pull')
1428 return self.addchangegroup(cg, 'pull', remote.url())
1424 return self.addchangegroup(cg, 'pull', remote.url())
1429 finally:
1425 finally:
1430 lock.release()
1426 lock.release()
1431
1427
1432 def push(self, remote, force=False, revs=None):
1428 def push(self, remote, force=False, revs=None):
1433 # there are two ways to push to remote repo:
1429 # there are two ways to push to remote repo:
1434 #
1430 #
1435 # addchangegroup assumes local user can lock remote
1431 # addchangegroup assumes local user can lock remote
1436 # repo (local filesystem, old ssh servers).
1432 # repo (local filesystem, old ssh servers).
1437 #
1433 #
1438 # unbundle assumes local user cannot lock remote repo (new ssh
1434 # unbundle assumes local user cannot lock remote repo (new ssh
1439 # servers, http servers).
1435 # servers, http servers).
1440
1436
1441 if remote.capable('unbundle'):
1437 if remote.capable('unbundle'):
1442 return self.push_unbundle(remote, force, revs)
1438 return self.push_unbundle(remote, force, revs)
1443 return self.push_addchangegroup(remote, force, revs)
1439 return self.push_addchangegroup(remote, force, revs)
1444
1440
1445 def prepush(self, remote, force, revs):
1441 def prepush(self, remote, force, revs):
1446 common = {}
1442 common = {}
1447 remote_heads = remote.heads()
1443 remote_heads = remote.heads()
1448 inc = self.findincoming(remote, common, remote_heads, force=force)
1444 inc = self.findincoming(remote, common, remote_heads, force=force)
1449
1445
1450 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1446 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1451 if revs is not None:
1447 if revs is not None:
1452 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1448 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1453 else:
1449 else:
1454 bases, heads = update, self.changelog.heads()
1450 bases, heads = update, self.changelog.heads()
1455
1451
1456 if not bases:
1452 if not bases:
1457 self.ui.status(_("no changes found\n"))
1453 self.ui.status(_("no changes found\n"))
1458 return None, 1
1454 return None, 1
1459 elif not force:
1455 elif not force:
1460 # check if we're creating new remote heads
1456 # check if we're creating new remote heads
1461 # to be a remote head after push, node must be either
1457 # to be a remote head after push, node must be either
1462 # - unknown locally
1458 # - unknown locally
1463 # - a local outgoing head descended from update
1459 # - a local outgoing head descended from update
1464 # - a remote head that's known locally and not
1460 # - a remote head that's known locally and not
1465 # ancestral to an outgoing head
1461 # ancestral to an outgoing head
1466
1462
1467 warn = 0
1463 warn = 0
1468
1464
1469 if remote_heads == [nullid]:
1465 if remote_heads == [nullid]:
1470 warn = 0
1466 warn = 0
1471 elif not revs and len(heads) > len(remote_heads):
1467 elif not revs and len(heads) > len(remote_heads):
1472 warn = 1
1468 warn = 1
1473 else:
1469 else:
1474 newheads = list(heads)
1470 newheads = list(heads)
1475 for r in remote_heads:
1471 for r in remote_heads:
1476 if r in self.changelog.nodemap:
1472 if r in self.changelog.nodemap:
1477 desc = self.changelog.heads(r, heads)
1473 desc = self.changelog.heads(r, heads)
1478 l = [h for h in heads if h in desc]
1474 l = [h for h in heads if h in desc]
1479 if not l:
1475 if not l:
1480 newheads.append(r)
1476 newheads.append(r)
1481 else:
1477 else:
1482 newheads.append(r)
1478 newheads.append(r)
1483 if len(newheads) > len(remote_heads):
1479 if len(newheads) > len(remote_heads):
1484 warn = 1
1480 warn = 1
1485
1481
1486 if warn:
1482 if warn:
1487 self.ui.warn(_("abort: push creates new remote heads!\n"))
1483 self.ui.warn(_("abort: push creates new remote heads!\n"))
1488 self.ui.status(_("(did you forget to merge?"
1484 self.ui.status(_("(did you forget to merge?"
1489 " use push -f to force)\n"))
1485 " use push -f to force)\n"))
1490 return None, 0
1486 return None, 0
1491 elif inc:
1487 elif inc:
1492 self.ui.warn(_("note: unsynced remote changes!\n"))
1488 self.ui.warn(_("note: unsynced remote changes!\n"))
1493
1489
1494
1490
1495 if revs is None:
1491 if revs is None:
1496 # use the fast path, no race possible on push
1492 # use the fast path, no race possible on push
1497 cg = self._changegroup(common.keys(), 'push')
1493 cg = self._changegroup(common.keys(), 'push')
1498 else:
1494 else:
1499 cg = self.changegroupsubset(update, revs, 'push')
1495 cg = self.changegroupsubset(update, revs, 'push')
1500 return cg, remote_heads
1496 return cg, remote_heads
1501
1497
1502 def push_addchangegroup(self, remote, force, revs):
1498 def push_addchangegroup(self, remote, force, revs):
1503 lock = remote.lock()
1499 lock = remote.lock()
1504 try:
1500 try:
1505 ret = self.prepush(remote, force, revs)
1501 ret = self.prepush(remote, force, revs)
1506 if ret[0] is not None:
1502 if ret[0] is not None:
1507 cg, remote_heads = ret
1503 cg, remote_heads = ret
1508 return remote.addchangegroup(cg, 'push', self.url())
1504 return remote.addchangegroup(cg, 'push', self.url())
1509 return ret[1]
1505 return ret[1]
1510 finally:
1506 finally:
1511 lock.release()
1507 lock.release()
1512
1508
1513 def push_unbundle(self, remote, force, revs):
1509 def push_unbundle(self, remote, force, revs):
1514 # local repo finds heads on server, finds out what revs it
1510 # local repo finds heads on server, finds out what revs it
1515 # must push. once revs transferred, if server finds it has
1511 # must push. once revs transferred, if server finds it has
1516 # different heads (someone else won commit/push race), server
1512 # different heads (someone else won commit/push race), server
1517 # aborts.
1513 # aborts.
1518
1514
1519 ret = self.prepush(remote, force, revs)
1515 ret = self.prepush(remote, force, revs)
1520 if ret[0] is not None:
1516 if ret[0] is not None:
1521 cg, remote_heads = ret
1517 cg, remote_heads = ret
1522 if force: remote_heads = ['force']
1518 if force: remote_heads = ['force']
1523 return remote.unbundle(cg, remote_heads, 'push')
1519 return remote.unbundle(cg, remote_heads, 'push')
1524 return ret[1]
1520 return ret[1]
1525
1521
1526 def changegroupinfo(self, nodes, source):
1522 def changegroupinfo(self, nodes, source):
1527 if self.ui.verbose or source == 'bundle':
1523 if self.ui.verbose or source == 'bundle':
1528 self.ui.status(_("%d changesets found\n") % len(nodes))
1524 self.ui.status(_("%d changesets found\n") % len(nodes))
1529 if self.ui.debugflag:
1525 if self.ui.debugflag:
1530 self.ui.debug(_("list of changesets:\n"))
1526 self.ui.debug(_("list of changesets:\n"))
1531 for node in nodes:
1527 for node in nodes:
1532 self.ui.debug("%s\n" % hex(node))
1528 self.ui.debug("%s\n" % hex(node))
1533
1529
1534 def changegroupsubset(self, bases, heads, source, extranodes=None):
1530 def changegroupsubset(self, bases, heads, source, extranodes=None):
1535 """This function generates a changegroup consisting of all the nodes
1531 """This function generates a changegroup consisting of all the nodes
1536 that are descendents of any of the bases, and ancestors of any of
1532 that are descendents of any of the bases, and ancestors of any of
1537 the heads.
1533 the heads.
1538
1534
1539 It is fairly complex as determining which filenodes and which
1535 It is fairly complex as determining which filenodes and which
1540 manifest nodes need to be included for the changeset to be complete
1536 manifest nodes need to be included for the changeset to be complete
1541 is non-trivial.
1537 is non-trivial.
1542
1538
1543 Another wrinkle is doing the reverse, figuring out which changeset in
1539 Another wrinkle is doing the reverse, figuring out which changeset in
1544 the changegroup a particular filenode or manifestnode belongs to.
1540 the changegroup a particular filenode or manifestnode belongs to.
1545
1541
1546 The caller can specify some nodes that must be included in the
1542 The caller can specify some nodes that must be included in the
1547 changegroup using the extranodes argument. It should be a dict
1543 changegroup using the extranodes argument. It should be a dict
1548 where the keys are the filenames (or 1 for the manifest), and the
1544 where the keys are the filenames (or 1 for the manifest), and the
1549 values are lists of (node, linknode) tuples, where node is a wanted
1545 values are lists of (node, linknode) tuples, where node is a wanted
1550 node and linknode is the changelog node that should be transmitted as
1546 node and linknode is the changelog node that should be transmitted as
1551 the linkrev.
1547 the linkrev.
1552 """
1548 """
1553
1549
1554 if extranodes is None:
1550 if extranodes is None:
1555 # can we go through the fast path ?
1551 # can we go through the fast path ?
1556 heads.sort()
1552 heads.sort()
1557 allheads = self.heads()
1553 allheads = self.heads()
1558 allheads.sort()
1554 allheads.sort()
1559 if heads == allheads:
1555 if heads == allheads:
1560 common = []
1556 common = []
1561 # parents of bases are known from both sides
1557 # parents of bases are known from both sides
1562 for n in bases:
1558 for n in bases:
1563 for p in self.changelog.parents(n):
1559 for p in self.changelog.parents(n):
1564 if p != nullid:
1560 if p != nullid:
1565 common.append(p)
1561 common.append(p)
1566 return self._changegroup(common, source)
1562 return self._changegroup(common, source)
1567
1563
1568 self.hook('preoutgoing', throw=True, source=source)
1564 self.hook('preoutgoing', throw=True, source=source)
1569
1565
1570 # Set up some initial variables
1566 # Set up some initial variables
1571 # Make it easy to refer to self.changelog
1567 # Make it easy to refer to self.changelog
1572 cl = self.changelog
1568 cl = self.changelog
1573 # msng is short for missing - compute the list of changesets in this
1569 # msng is short for missing - compute the list of changesets in this
1574 # changegroup.
1570 # changegroup.
1575 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1571 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1576 self.changegroupinfo(msng_cl_lst, source)
1572 self.changegroupinfo(msng_cl_lst, source)
1577 # Some bases may turn out to be superfluous, and some heads may be
1573 # Some bases may turn out to be superfluous, and some heads may be
1578 # too. nodesbetween will return the minimal set of bases and heads
1574 # too. nodesbetween will return the minimal set of bases and heads
1579 # necessary to re-create the changegroup.
1575 # necessary to re-create the changegroup.
1580
1576
1581 # Known heads are the list of heads that it is assumed the recipient
1577 # Known heads are the list of heads that it is assumed the recipient
1582 # of this changegroup will know about.
1578 # of this changegroup will know about.
1583 knownheads = {}
1579 knownheads = {}
1584 # We assume that all parents of bases are known heads.
1580 # We assume that all parents of bases are known heads.
1585 for n in bases:
1581 for n in bases:
1586 for p in cl.parents(n):
1582 for p in cl.parents(n):
1587 if p != nullid:
1583 if p != nullid:
1588 knownheads[p] = 1
1584 knownheads[p] = 1
1589 knownheads = knownheads.keys()
1585 knownheads = knownheads.keys()
1590 if knownheads:
1586 if knownheads:
1591 # Now that we know what heads are known, we can compute which
1587 # Now that we know what heads are known, we can compute which
1592 # changesets are known. The recipient must know about all
1588 # changesets are known. The recipient must know about all
1593 # changesets required to reach the known heads from the null
1589 # changesets required to reach the known heads from the null
1594 # changeset.
1590 # changeset.
1595 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1591 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1596 junk = None
1592 junk = None
1597 # Transform the list into a set.
1593 # Transform the list into a set.
1598 has_cl_set = set(has_cl_set)
1594 has_cl_set = set(has_cl_set)
1599 else:
1595 else:
1600 # If there were no known heads, the recipient cannot be assumed to
1596 # If there were no known heads, the recipient cannot be assumed to
1601 # know about any changesets.
1597 # know about any changesets.
1602 has_cl_set = set()
1598 has_cl_set = set()
1603
1599
1604 # Make it easy to refer to self.manifest
1600 # Make it easy to refer to self.manifest
1605 mnfst = self.manifest
1601 mnfst = self.manifest
1606 # We don't know which manifests are missing yet
1602 # We don't know which manifests are missing yet
1607 msng_mnfst_set = {}
1603 msng_mnfst_set = {}
1608 # Nor do we know which filenodes are missing.
1604 # Nor do we know which filenodes are missing.
1609 msng_filenode_set = {}
1605 msng_filenode_set = {}
1610
1606
1611 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1607 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1612 junk = None
1608 junk = None
1613
1609
1614 # A changeset always belongs to itself, so the changenode lookup
1610 # A changeset always belongs to itself, so the changenode lookup
1615 # function for a changenode is identity.
1611 # function for a changenode is identity.
1616 def identity(x):
1612 def identity(x):
1617 return x
1613 return x
1618
1614
1619 # A function generating function. Sets up an environment for the
1615 # A function generating function. Sets up an environment for the
1620 # inner function.
1616 # inner function.
1621 def cmp_by_rev_func(revlog):
1617 def cmp_by_rev_func(revlog):
1622 # Compare two nodes by their revision number in the environment's
1618 # Compare two nodes by their revision number in the environment's
1623 # revision history. Since the revision number both represents the
1619 # revision history. Since the revision number both represents the
1624 # most efficient order to read the nodes in, and represents a
1620 # most efficient order to read the nodes in, and represents a
1625 # topological sorting of the nodes, this function is often useful.
1621 # topological sorting of the nodes, this function is often useful.
1626 def cmp_by_rev(a, b):
1622 def cmp_by_rev(a, b):
1627 return cmp(revlog.rev(a), revlog.rev(b))
1623 return cmp(revlog.rev(a), revlog.rev(b))
1628 return cmp_by_rev
1624 return cmp_by_rev
1629
1625
1630 # If we determine that a particular file or manifest node must be a
1626 # If we determine that a particular file or manifest node must be a
1631 # node that the recipient of the changegroup will already have, we can
1627 # node that the recipient of the changegroup will already have, we can
1632 # also assume the recipient will have all the parents. This function
1628 # also assume the recipient will have all the parents. This function
1633 # prunes them from the set of missing nodes.
1629 # prunes them from the set of missing nodes.
1634 def prune_parents(revlog, hasset, msngset):
1630 def prune_parents(revlog, hasset, msngset):
1635 haslst = hasset.keys()
1631 haslst = hasset.keys()
1636 haslst.sort(cmp_by_rev_func(revlog))
1632 haslst.sort(cmp_by_rev_func(revlog))
1637 for node in haslst:
1633 for node in haslst:
1638 parentlst = [p for p in revlog.parents(node) if p != nullid]
1634 parentlst = [p for p in revlog.parents(node) if p != nullid]
1639 while parentlst:
1635 while parentlst:
1640 n = parentlst.pop()
1636 n = parentlst.pop()
1641 if n not in hasset:
1637 if n not in hasset:
1642 hasset[n] = 1
1638 hasset[n] = 1
1643 p = [p for p in revlog.parents(n) if p != nullid]
1639 p = [p for p in revlog.parents(n) if p != nullid]
1644 parentlst.extend(p)
1640 parentlst.extend(p)
1645 for n in hasset:
1641 for n in hasset:
1646 msngset.pop(n, None)
1642 msngset.pop(n, None)
1647
1643
1648 # This is a function generating function used to set up an environment
1644 # This is a function generating function used to set up an environment
1649 # for the inner function to execute in.
1645 # for the inner function to execute in.
1650 def manifest_and_file_collector(changedfileset):
1646 def manifest_and_file_collector(changedfileset):
1651 # This is an information gathering function that gathers
1647 # This is an information gathering function that gathers
1652 # information from each changeset node that goes out as part of
1648 # information from each changeset node that goes out as part of
1653 # the changegroup. The information gathered is a list of which
1649 # the changegroup. The information gathered is a list of which
1654 # manifest nodes are potentially required (the recipient may
1650 # manifest nodes are potentially required (the recipient may
1655 # already have them) and total list of all files which were
1651 # already have them) and total list of all files which were
1656 # changed in any changeset in the changegroup.
1652 # changed in any changeset in the changegroup.
1657 #
1653 #
1658 # We also remember the first changenode we saw any manifest
1654 # We also remember the first changenode we saw any manifest
1659 # referenced by so we can later determine which changenode 'owns'
1655 # referenced by so we can later determine which changenode 'owns'
1660 # the manifest.
1656 # the manifest.
1661 def collect_manifests_and_files(clnode):
1657 def collect_manifests_and_files(clnode):
1662 c = cl.read(clnode)
1658 c = cl.read(clnode)
1663 for f in c[3]:
1659 for f in c[3]:
1664 # This is to make sure we only have one instance of each
1660 # This is to make sure we only have one instance of each
1665 # filename string for each filename.
1661 # filename string for each filename.
1666 changedfileset.setdefault(f, f)
1662 changedfileset.setdefault(f, f)
1667 msng_mnfst_set.setdefault(c[0], clnode)
1663 msng_mnfst_set.setdefault(c[0], clnode)
1668 return collect_manifests_and_files
1664 return collect_manifests_and_files
1669
1665
1670 # Figure out which manifest nodes (of the ones we think might be part
1666 # Figure out which manifest nodes (of the ones we think might be part
1671 # of the changegroup) the recipient must know about and remove them
1667 # of the changegroup) the recipient must know about and remove them
1672 # from the changegroup.
1668 # from the changegroup.
1673 def prune_manifests():
1669 def prune_manifests():
1674 has_mnfst_set = {}
1670 has_mnfst_set = {}
1675 for n in msng_mnfst_set:
1671 for n in msng_mnfst_set:
1676 # If a 'missing' manifest thinks it belongs to a changenode
1672 # If a 'missing' manifest thinks it belongs to a changenode
1677 # the recipient is assumed to have, obviously the recipient
1673 # the recipient is assumed to have, obviously the recipient
1678 # must have that manifest.
1674 # must have that manifest.
1679 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1675 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1680 if linknode in has_cl_set:
1676 if linknode in has_cl_set:
1681 has_mnfst_set[n] = 1
1677 has_mnfst_set[n] = 1
1682 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1678 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1683
1679
1684 # Use the information collected in collect_manifests_and_files to say
1680 # Use the information collected in collect_manifests_and_files to say
1685 # which changenode any manifestnode belongs to.
1681 # which changenode any manifestnode belongs to.
1686 def lookup_manifest_link(mnfstnode):
1682 def lookup_manifest_link(mnfstnode):
1687 return msng_mnfst_set[mnfstnode]
1683 return msng_mnfst_set[mnfstnode]
1688
1684
1689 # A function generating function that sets up the initial environment
1685 # A function generating function that sets up the initial environment
1690 # the inner function.
1686 # the inner function.
1691 def filenode_collector(changedfiles):
1687 def filenode_collector(changedfiles):
1692 next_rev = [0]
1688 next_rev = [0]
1693 # This gathers information from each manifestnode included in the
1689 # This gathers information from each manifestnode included in the
1694 # changegroup about which filenodes the manifest node references
1690 # changegroup about which filenodes the manifest node references
1695 # so we can include those in the changegroup too.
1691 # so we can include those in the changegroup too.
1696 #
1692 #
1697 # It also remembers which changenode each filenode belongs to. It
1693 # It also remembers which changenode each filenode belongs to. It
1698 # does this by assuming the a filenode belongs to the changenode
1694 # does this by assuming the a filenode belongs to the changenode
1699 # the first manifest that references it belongs to.
1695 # the first manifest that references it belongs to.
1700 def collect_msng_filenodes(mnfstnode):
1696 def collect_msng_filenodes(mnfstnode):
1701 r = mnfst.rev(mnfstnode)
1697 r = mnfst.rev(mnfstnode)
1702 if r == next_rev[0]:
1698 if r == next_rev[0]:
1703 # If the last rev we looked at was the one just previous,
1699 # If the last rev we looked at was the one just previous,
1704 # we only need to see a diff.
1700 # we only need to see a diff.
1705 deltamf = mnfst.readdelta(mnfstnode)
1701 deltamf = mnfst.readdelta(mnfstnode)
1706 # For each line in the delta
1702 # For each line in the delta
1707 for f, fnode in deltamf.iteritems():
1703 for f, fnode in deltamf.iteritems():
1708 f = changedfiles.get(f, None)
1704 f = changedfiles.get(f, None)
1709 # And if the file is in the list of files we care
1705 # And if the file is in the list of files we care
1710 # about.
1706 # about.
1711 if f is not None:
1707 if f is not None:
1712 # Get the changenode this manifest belongs to
1708 # Get the changenode this manifest belongs to
1713 clnode = msng_mnfst_set[mnfstnode]
1709 clnode = msng_mnfst_set[mnfstnode]
1714 # Create the set of filenodes for the file if
1710 # Create the set of filenodes for the file if
1715 # there isn't one already.
1711 # there isn't one already.
1716 ndset = msng_filenode_set.setdefault(f, {})
1712 ndset = msng_filenode_set.setdefault(f, {})
1717 # And set the filenode's changelog node to the
1713 # And set the filenode's changelog node to the
1718 # manifest's if it hasn't been set already.
1714 # manifest's if it hasn't been set already.
1719 ndset.setdefault(fnode, clnode)
1715 ndset.setdefault(fnode, clnode)
1720 else:
1716 else:
1721 # Otherwise we need a full manifest.
1717 # Otherwise we need a full manifest.
1722 m = mnfst.read(mnfstnode)
1718 m = mnfst.read(mnfstnode)
1723 # For every file in we care about.
1719 # For every file in we care about.
1724 for f in changedfiles:
1720 for f in changedfiles:
1725 fnode = m.get(f, None)
1721 fnode = m.get(f, None)
1726 # If it's in the manifest
1722 # If it's in the manifest
1727 if fnode is not None:
1723 if fnode is not None:
1728 # See comments above.
1724 # See comments above.
1729 clnode = msng_mnfst_set[mnfstnode]
1725 clnode = msng_mnfst_set[mnfstnode]
1730 ndset = msng_filenode_set.setdefault(f, {})
1726 ndset = msng_filenode_set.setdefault(f, {})
1731 ndset.setdefault(fnode, clnode)
1727 ndset.setdefault(fnode, clnode)
1732 # Remember the revision we hope to see next.
1728 # Remember the revision we hope to see next.
1733 next_rev[0] = r + 1
1729 next_rev[0] = r + 1
1734 return collect_msng_filenodes
1730 return collect_msng_filenodes
1735
1731
1736 # We have a list of filenodes we think we need for a file, lets remove
1732 # We have a list of filenodes we think we need for a file, lets remove
1737 # all those we know the recipient must have.
1733 # all those we know the recipient must have.
1738 def prune_filenodes(f, filerevlog):
1734 def prune_filenodes(f, filerevlog):
1739 msngset = msng_filenode_set[f]
1735 msngset = msng_filenode_set[f]
1740 hasset = {}
1736 hasset = {}
1741 # If a 'missing' filenode thinks it belongs to a changenode we
1737 # If a 'missing' filenode thinks it belongs to a changenode we
1742 # assume the recipient must have, then the recipient must have
1738 # assume the recipient must have, then the recipient must have
1743 # that filenode.
1739 # that filenode.
1744 for n in msngset:
1740 for n in msngset:
1745 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1741 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1746 if clnode in has_cl_set:
1742 if clnode in has_cl_set:
1747 hasset[n] = 1
1743 hasset[n] = 1
1748 prune_parents(filerevlog, hasset, msngset)
1744 prune_parents(filerevlog, hasset, msngset)
1749
1745
1750 # A function generator function that sets up the a context for the
1746 # A function generator function that sets up the a context for the
1751 # inner function.
1747 # inner function.
1752 def lookup_filenode_link_func(fname):
1748 def lookup_filenode_link_func(fname):
1753 msngset = msng_filenode_set[fname]
1749 msngset = msng_filenode_set[fname]
1754 # Lookup the changenode the filenode belongs to.
1750 # Lookup the changenode the filenode belongs to.
1755 def lookup_filenode_link(fnode):
1751 def lookup_filenode_link(fnode):
1756 return msngset[fnode]
1752 return msngset[fnode]
1757 return lookup_filenode_link
1753 return lookup_filenode_link
1758
1754
1759 # Add the nodes that were explicitly requested.
1755 # Add the nodes that were explicitly requested.
1760 def add_extra_nodes(name, nodes):
1756 def add_extra_nodes(name, nodes):
1761 if not extranodes or name not in extranodes:
1757 if not extranodes or name not in extranodes:
1762 return
1758 return
1763
1759
1764 for node, linknode in extranodes[name]:
1760 for node, linknode in extranodes[name]:
1765 if node not in nodes:
1761 if node not in nodes:
1766 nodes[node] = linknode
1762 nodes[node] = linknode
1767
1763
1768 # Now that we have all theses utility functions to help out and
1764 # Now that we have all theses utility functions to help out and
1769 # logically divide up the task, generate the group.
1765 # logically divide up the task, generate the group.
1770 def gengroup():
1766 def gengroup():
1771 # The set of changed files starts empty.
1767 # The set of changed files starts empty.
1772 changedfiles = {}
1768 changedfiles = {}
1773 # Create a changenode group generator that will call our functions
1769 # Create a changenode group generator that will call our functions
1774 # back to lookup the owning changenode and collect information.
1770 # back to lookup the owning changenode and collect information.
1775 group = cl.group(msng_cl_lst, identity,
1771 group = cl.group(msng_cl_lst, identity,
1776 manifest_and_file_collector(changedfiles))
1772 manifest_and_file_collector(changedfiles))
1777 for chnk in group:
1773 for chnk in group:
1778 yield chnk
1774 yield chnk
1779
1775
1780 # The list of manifests has been collected by the generator
1776 # The list of manifests has been collected by the generator
1781 # calling our functions back.
1777 # calling our functions back.
1782 prune_manifests()
1778 prune_manifests()
1783 add_extra_nodes(1, msng_mnfst_set)
1779 add_extra_nodes(1, msng_mnfst_set)
1784 msng_mnfst_lst = msng_mnfst_set.keys()
1780 msng_mnfst_lst = msng_mnfst_set.keys()
1785 # Sort the manifestnodes by revision number.
1781 # Sort the manifestnodes by revision number.
1786 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1782 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1787 # Create a generator for the manifestnodes that calls our lookup
1783 # Create a generator for the manifestnodes that calls our lookup
1788 # and data collection functions back.
1784 # and data collection functions back.
1789 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1785 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1790 filenode_collector(changedfiles))
1786 filenode_collector(changedfiles))
1791 for chnk in group:
1787 for chnk in group:
1792 yield chnk
1788 yield chnk
1793
1789
1794 # These are no longer needed, dereference and toss the memory for
1790 # These are no longer needed, dereference and toss the memory for
1795 # them.
1791 # them.
1796 msng_mnfst_lst = None
1792 msng_mnfst_lst = None
1797 msng_mnfst_set.clear()
1793 msng_mnfst_set.clear()
1798
1794
1799 if extranodes:
1795 if extranodes:
1800 for fname in extranodes:
1796 for fname in extranodes:
1801 if isinstance(fname, int):
1797 if isinstance(fname, int):
1802 continue
1798 continue
1803 msng_filenode_set.setdefault(fname, {})
1799 msng_filenode_set.setdefault(fname, {})
1804 changedfiles[fname] = 1
1800 changedfiles[fname] = 1
1805 # Go through all our files in order sorted by name.
1801 # Go through all our files in order sorted by name.
1806 for fname in sorted(changedfiles):
1802 for fname in sorted(changedfiles):
1807 filerevlog = self.file(fname)
1803 filerevlog = self.file(fname)
1808 if not len(filerevlog):
1804 if not len(filerevlog):
1809 raise util.Abort(_("empty or missing revlog for %s") % fname)
1805 raise util.Abort(_("empty or missing revlog for %s") % fname)
1810 # Toss out the filenodes that the recipient isn't really
1806 # Toss out the filenodes that the recipient isn't really
1811 # missing.
1807 # missing.
1812 if fname in msng_filenode_set:
1808 if fname in msng_filenode_set:
1813 prune_filenodes(fname, filerevlog)
1809 prune_filenodes(fname, filerevlog)
1814 add_extra_nodes(fname, msng_filenode_set[fname])
1810 add_extra_nodes(fname, msng_filenode_set[fname])
1815 msng_filenode_lst = msng_filenode_set[fname].keys()
1811 msng_filenode_lst = msng_filenode_set[fname].keys()
1816 else:
1812 else:
1817 msng_filenode_lst = []
1813 msng_filenode_lst = []
1818 # If any filenodes are left, generate the group for them,
1814 # If any filenodes are left, generate the group for them,
1819 # otherwise don't bother.
1815 # otherwise don't bother.
1820 if len(msng_filenode_lst) > 0:
1816 if len(msng_filenode_lst) > 0:
1821 yield changegroup.chunkheader(len(fname))
1817 yield changegroup.chunkheader(len(fname))
1822 yield fname
1818 yield fname
1823 # Sort the filenodes by their revision #
1819 # Sort the filenodes by their revision #
1824 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1820 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1825 # Create a group generator and only pass in a changenode
1821 # Create a group generator and only pass in a changenode
1826 # lookup function as we need to collect no information
1822 # lookup function as we need to collect no information
1827 # from filenodes.
1823 # from filenodes.
1828 group = filerevlog.group(msng_filenode_lst,
1824 group = filerevlog.group(msng_filenode_lst,
1829 lookup_filenode_link_func(fname))
1825 lookup_filenode_link_func(fname))
1830 for chnk in group:
1826 for chnk in group:
1831 yield chnk
1827 yield chnk
1832 if fname in msng_filenode_set:
1828 if fname in msng_filenode_set:
1833 # Don't need this anymore, toss it to free memory.
1829 # Don't need this anymore, toss it to free memory.
1834 del msng_filenode_set[fname]
1830 del msng_filenode_set[fname]
1835 # Signal that no more groups are left.
1831 # Signal that no more groups are left.
1836 yield changegroup.closechunk()
1832 yield changegroup.closechunk()
1837
1833
1838 if msng_cl_lst:
1834 if msng_cl_lst:
1839 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1835 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1840
1836
1841 return util.chunkbuffer(gengroup())
1837 return util.chunkbuffer(gengroup())
1842
1838
1843 def changegroup(self, basenodes, source):
1839 def changegroup(self, basenodes, source):
1844 # to avoid a race we use changegroupsubset() (issue1320)
1840 # to avoid a race we use changegroupsubset() (issue1320)
1845 return self.changegroupsubset(basenodes, self.heads(), source)
1841 return self.changegroupsubset(basenodes, self.heads(), source)
1846
1842
1847 def _changegroup(self, common, source):
1843 def _changegroup(self, common, source):
1848 """Generate a changegroup of all nodes that we have that a recipient
1844 """Generate a changegroup of all nodes that we have that a recipient
1849 doesn't.
1845 doesn't.
1850
1846
1851 This is much easier than the previous function as we can assume that
1847 This is much easier than the previous function as we can assume that
1852 the recipient has any changenode we aren't sending them.
1848 the recipient has any changenode we aren't sending them.
1853
1849
1854 common is the set of common nodes between remote and self"""
1850 common is the set of common nodes between remote and self"""
1855
1851
1856 self.hook('preoutgoing', throw=True, source=source)
1852 self.hook('preoutgoing', throw=True, source=source)
1857
1853
1858 cl = self.changelog
1854 cl = self.changelog
1859 nodes = cl.findmissing(common)
1855 nodes = cl.findmissing(common)
1860 revset = set([cl.rev(n) for n in nodes])
1856 revset = set([cl.rev(n) for n in nodes])
1861 self.changegroupinfo(nodes, source)
1857 self.changegroupinfo(nodes, source)
1862
1858
1863 def identity(x):
1859 def identity(x):
1864 return x
1860 return x
1865
1861
1866 def gennodelst(log):
1862 def gennodelst(log):
1867 for r in log:
1863 for r in log:
1868 if log.linkrev(r) in revset:
1864 if log.linkrev(r) in revset:
1869 yield log.node(r)
1865 yield log.node(r)
1870
1866
1871 def changed_file_collector(changedfileset):
1867 def changed_file_collector(changedfileset):
1872 def collect_changed_files(clnode):
1868 def collect_changed_files(clnode):
1873 c = cl.read(clnode)
1869 c = cl.read(clnode)
1874 for fname in c[3]:
1870 for fname in c[3]:
1875 changedfileset[fname] = 1
1871 changedfileset[fname] = 1
1876 return collect_changed_files
1872 return collect_changed_files
1877
1873
1878 def lookuprevlink_func(revlog):
1874 def lookuprevlink_func(revlog):
1879 def lookuprevlink(n):
1875 def lookuprevlink(n):
1880 return cl.node(revlog.linkrev(revlog.rev(n)))
1876 return cl.node(revlog.linkrev(revlog.rev(n)))
1881 return lookuprevlink
1877 return lookuprevlink
1882
1878
1883 def gengroup():
1879 def gengroup():
1884 # construct a list of all changed files
1880 # construct a list of all changed files
1885 changedfiles = {}
1881 changedfiles = {}
1886
1882
1887 for chnk in cl.group(nodes, identity,
1883 for chnk in cl.group(nodes, identity,
1888 changed_file_collector(changedfiles)):
1884 changed_file_collector(changedfiles)):
1889 yield chnk
1885 yield chnk
1890
1886
1891 mnfst = self.manifest
1887 mnfst = self.manifest
1892 nodeiter = gennodelst(mnfst)
1888 nodeiter = gennodelst(mnfst)
1893 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1889 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1894 yield chnk
1890 yield chnk
1895
1891
1896 for fname in sorted(changedfiles):
1892 for fname in sorted(changedfiles):
1897 filerevlog = self.file(fname)
1893 filerevlog = self.file(fname)
1898 if not len(filerevlog):
1894 if not len(filerevlog):
1899 raise util.Abort(_("empty or missing revlog for %s") % fname)
1895 raise util.Abort(_("empty or missing revlog for %s") % fname)
1900 nodeiter = gennodelst(filerevlog)
1896 nodeiter = gennodelst(filerevlog)
1901 nodeiter = list(nodeiter)
1897 nodeiter = list(nodeiter)
1902 if nodeiter:
1898 if nodeiter:
1903 yield changegroup.chunkheader(len(fname))
1899 yield changegroup.chunkheader(len(fname))
1904 yield fname
1900 yield fname
1905 lookup = lookuprevlink_func(filerevlog)
1901 lookup = lookuprevlink_func(filerevlog)
1906 for chnk in filerevlog.group(nodeiter, lookup):
1902 for chnk in filerevlog.group(nodeiter, lookup):
1907 yield chnk
1903 yield chnk
1908
1904
1909 yield changegroup.closechunk()
1905 yield changegroup.closechunk()
1910
1906
1911 if nodes:
1907 if nodes:
1912 self.hook('outgoing', node=hex(nodes[0]), source=source)
1908 self.hook('outgoing', node=hex(nodes[0]), source=source)
1913
1909
1914 return util.chunkbuffer(gengroup())
1910 return util.chunkbuffer(gengroup())
1915
1911
1916 def addchangegroup(self, source, srctype, url, emptyok=False):
1912 def addchangegroup(self, source, srctype, url, emptyok=False):
1917 """add changegroup to repo.
1913 """add changegroup to repo.
1918
1914
1919 return values:
1915 return values:
1920 - nothing changed or no source: 0
1916 - nothing changed or no source: 0
1921 - more heads than before: 1+added heads (2..n)
1917 - more heads than before: 1+added heads (2..n)
1922 - less heads than before: -1-removed heads (-2..-n)
1918 - less heads than before: -1-removed heads (-2..-n)
1923 - number of heads stays the same: 1
1919 - number of heads stays the same: 1
1924 """
1920 """
1925 def csmap(x):
1921 def csmap(x):
1926 self.ui.debug(_("add changeset %s\n") % short(x))
1922 self.ui.debug(_("add changeset %s\n") % short(x))
1927 return len(cl)
1923 return len(cl)
1928
1924
1929 def revmap(x):
1925 def revmap(x):
1930 return cl.rev(x)
1926 return cl.rev(x)
1931
1927
1932 if not source:
1928 if not source:
1933 return 0
1929 return 0
1934
1930
1935 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1931 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1936
1932
1937 changesets = files = revisions = 0
1933 changesets = files = revisions = 0
1938
1934
1939 # write changelog data to temp files so concurrent readers will not see
1935 # write changelog data to temp files so concurrent readers will not see
1940 # inconsistent view
1936 # inconsistent view
1941 cl = self.changelog
1937 cl = self.changelog
1942 cl.delayupdate()
1938 cl.delayupdate()
1943 oldheads = len(cl.heads())
1939 oldheads = len(cl.heads())
1944
1940
1945 tr = self.transaction()
1941 tr = self.transaction()
1946 try:
1942 try:
1947 trp = weakref.proxy(tr)
1943 trp = weakref.proxy(tr)
1948 # pull off the changeset group
1944 # pull off the changeset group
1949 self.ui.status(_("adding changesets\n"))
1945 self.ui.status(_("adding changesets\n"))
1950 clstart = len(cl)
1946 clstart = len(cl)
1951 chunkiter = changegroup.chunkiter(source)
1947 chunkiter = changegroup.chunkiter(source)
1952 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1948 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1953 raise util.Abort(_("received changelog group is empty"))
1949 raise util.Abort(_("received changelog group is empty"))
1954 clend = len(cl)
1950 clend = len(cl)
1955 changesets = clend - clstart
1951 changesets = clend - clstart
1956
1952
1957 # pull off the manifest group
1953 # pull off the manifest group
1958 self.ui.status(_("adding manifests\n"))
1954 self.ui.status(_("adding manifests\n"))
1959 chunkiter = changegroup.chunkiter(source)
1955 chunkiter = changegroup.chunkiter(source)
1960 # no need to check for empty manifest group here:
1956 # no need to check for empty manifest group here:
1961 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1957 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1962 # no new manifest will be created and the manifest group will
1958 # no new manifest will be created and the manifest group will
1963 # be empty during the pull
1959 # be empty during the pull
1964 self.manifest.addgroup(chunkiter, revmap, trp)
1960 self.manifest.addgroup(chunkiter, revmap, trp)
1965
1961
1966 # process the files
1962 # process the files
1967 self.ui.status(_("adding file changes\n"))
1963 self.ui.status(_("adding file changes\n"))
1968 while 1:
1964 while 1:
1969 f = changegroup.getchunk(source)
1965 f = changegroup.getchunk(source)
1970 if not f:
1966 if not f:
1971 break
1967 break
1972 self.ui.debug(_("adding %s revisions\n") % f)
1968 self.ui.debug(_("adding %s revisions\n") % f)
1973 fl = self.file(f)
1969 fl = self.file(f)
1974 o = len(fl)
1970 o = len(fl)
1975 chunkiter = changegroup.chunkiter(source)
1971 chunkiter = changegroup.chunkiter(source)
1976 if fl.addgroup(chunkiter, revmap, trp) is None:
1972 if fl.addgroup(chunkiter, revmap, trp) is None:
1977 raise util.Abort(_("received file revlog group is empty"))
1973 raise util.Abort(_("received file revlog group is empty"))
1978 revisions += len(fl) - o
1974 revisions += len(fl) - o
1979 files += 1
1975 files += 1
1980
1976
1981 newheads = len(cl.heads())
1977 newheads = len(cl.heads())
1982 heads = ""
1978 heads = ""
1983 if oldheads and newheads != oldheads:
1979 if oldheads and newheads != oldheads:
1984 heads = _(" (%+d heads)") % (newheads - oldheads)
1980 heads = _(" (%+d heads)") % (newheads - oldheads)
1985
1981
1986 self.ui.status(_("added %d changesets"
1982 self.ui.status(_("added %d changesets"
1987 " with %d changes to %d files%s\n")
1983 " with %d changes to %d files%s\n")
1988 % (changesets, revisions, files, heads))
1984 % (changesets, revisions, files, heads))
1989
1985
1990 if changesets > 0:
1986 if changesets > 0:
1991 p = lambda: cl.writepending() and self.root or ""
1987 p = lambda: cl.writepending() and self.root or ""
1992 self.hook('pretxnchangegroup', throw=True,
1988 self.hook('pretxnchangegroup', throw=True,
1993 node=hex(cl.node(clstart)), source=srctype,
1989 node=hex(cl.node(clstart)), source=srctype,
1994 url=url, pending=p)
1990 url=url, pending=p)
1995
1991
1996 # make changelog see real files again
1992 # make changelog see real files again
1997 cl.finalize(trp)
1993 cl.finalize(trp)
1998
1994
1999 tr.close()
1995 tr.close()
2000 finally:
1996 finally:
2001 del tr
1997 del tr
2002
1998
2003 if changesets > 0:
1999 if changesets > 0:
2004 # forcefully update the on-disk branch cache
2000 # forcefully update the on-disk branch cache
2005 self.ui.debug(_("updating the branch cache\n"))
2001 self.ui.debug(_("updating the branch cache\n"))
2006 self.branchtags()
2002 self.branchtags()
2007 self.hook("changegroup", node=hex(cl.node(clstart)),
2003 self.hook("changegroup", node=hex(cl.node(clstart)),
2008 source=srctype, url=url)
2004 source=srctype, url=url)
2009
2005
2010 for i in xrange(clstart, clend):
2006 for i in xrange(clstart, clend):
2011 self.hook("incoming", node=hex(cl.node(i)),
2007 self.hook("incoming", node=hex(cl.node(i)),
2012 source=srctype, url=url)
2008 source=srctype, url=url)
2013
2009
2014 # never return 0 here:
2010 # never return 0 here:
2015 if newheads < oldheads:
2011 if newheads < oldheads:
2016 return newheads - oldheads - 1
2012 return newheads - oldheads - 1
2017 else:
2013 else:
2018 return newheads - oldheads + 1
2014 return newheads - oldheads + 1
2019
2015
2020
2016
2021 def stream_in(self, remote):
2017 def stream_in(self, remote):
2022 fp = remote.stream_out()
2018 fp = remote.stream_out()
2023 l = fp.readline()
2019 l = fp.readline()
2024 try:
2020 try:
2025 resp = int(l)
2021 resp = int(l)
2026 except ValueError:
2022 except ValueError:
2027 raise error.ResponseError(
2023 raise error.ResponseError(
2028 _('Unexpected response from remote server:'), l)
2024 _('Unexpected response from remote server:'), l)
2029 if resp == 1:
2025 if resp == 1:
2030 raise util.Abort(_('operation forbidden by server'))
2026 raise util.Abort(_('operation forbidden by server'))
2031 elif resp == 2:
2027 elif resp == 2:
2032 raise util.Abort(_('locking the remote repository failed'))
2028 raise util.Abort(_('locking the remote repository failed'))
2033 elif resp != 0:
2029 elif resp != 0:
2034 raise util.Abort(_('the server sent an unknown error code'))
2030 raise util.Abort(_('the server sent an unknown error code'))
2035 self.ui.status(_('streaming all changes\n'))
2031 self.ui.status(_('streaming all changes\n'))
2036 l = fp.readline()
2032 l = fp.readline()
2037 try:
2033 try:
2038 total_files, total_bytes = map(int, l.split(' ', 1))
2034 total_files, total_bytes = map(int, l.split(' ', 1))
2039 except (ValueError, TypeError):
2035 except (ValueError, TypeError):
2040 raise error.ResponseError(
2036 raise error.ResponseError(
2041 _('Unexpected response from remote server:'), l)
2037 _('Unexpected response from remote server:'), l)
2042 self.ui.status(_('%d files to transfer, %s of data\n') %
2038 self.ui.status(_('%d files to transfer, %s of data\n') %
2043 (total_files, util.bytecount(total_bytes)))
2039 (total_files, util.bytecount(total_bytes)))
2044 start = time.time()
2040 start = time.time()
2045 for i in xrange(total_files):
2041 for i in xrange(total_files):
2046 # XXX doesn't support '\n' or '\r' in filenames
2042 # XXX doesn't support '\n' or '\r' in filenames
2047 l = fp.readline()
2043 l = fp.readline()
2048 try:
2044 try:
2049 name, size = l.split('\0', 1)
2045 name, size = l.split('\0', 1)
2050 size = int(size)
2046 size = int(size)
2051 except (ValueError, TypeError):
2047 except (ValueError, TypeError):
2052 raise error.ResponseError(
2048 raise error.ResponseError(
2053 _('Unexpected response from remote server:'), l)
2049 _('Unexpected response from remote server:'), l)
2054 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2050 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2055 ofp = self.sopener(name, 'w')
2051 ofp = self.sopener(name, 'w')
2056 for chunk in util.filechunkiter(fp, limit=size):
2052 for chunk in util.filechunkiter(fp, limit=size):
2057 ofp.write(chunk)
2053 ofp.write(chunk)
2058 ofp.close()
2054 ofp.close()
2059 elapsed = time.time() - start
2055 elapsed = time.time() - start
2060 if elapsed <= 0:
2056 if elapsed <= 0:
2061 elapsed = 0.001
2057 elapsed = 0.001
2062 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2058 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2063 (util.bytecount(total_bytes), elapsed,
2059 (util.bytecount(total_bytes), elapsed,
2064 util.bytecount(total_bytes / elapsed)))
2060 util.bytecount(total_bytes / elapsed)))
2065 self.invalidate()
2061 self.invalidate()
2066 return len(self.heads()) + 1
2062 return len(self.heads()) + 1
2067
2063
2068 def clone(self, remote, heads=[], stream=False):
2064 def clone(self, remote, heads=[], stream=False):
2069 '''clone remote repository.
2065 '''clone remote repository.
2070
2066
2071 keyword arguments:
2067 keyword arguments:
2072 heads: list of revs to clone (forces use of pull)
2068 heads: list of revs to clone (forces use of pull)
2073 stream: use streaming clone if possible'''
2069 stream: use streaming clone if possible'''
2074
2070
2075 # now, all clients that can request uncompressed clones can
2071 # now, all clients that can request uncompressed clones can
2076 # read repo formats supported by all servers that can serve
2072 # read repo formats supported by all servers that can serve
2077 # them.
2073 # them.
2078
2074
2079 # if revlog format changes, client will have to check version
2075 # if revlog format changes, client will have to check version
2080 # and format flags on "stream" capability, and use
2076 # and format flags on "stream" capability, and use
2081 # uncompressed only if compatible.
2077 # uncompressed only if compatible.
2082
2078
2083 if stream and not heads and remote.capable('stream'):
2079 if stream and not heads and remote.capable('stream'):
2084 return self.stream_in(remote)
2080 return self.stream_in(remote)
2085 return self.pull(remote, heads)
2081 return self.pull(remote, heads)
2086
2082
2087 # used to avoid circular references so destructors work
2083 # used to avoid circular references so destructors work
2088 def aftertrans(files):
2084 def aftertrans(files):
2089 renamefiles = [tuple(t) for t in files]
2085 renamefiles = [tuple(t) for t in files]
2090 def a():
2086 def a():
2091 for src, dest in renamefiles:
2087 for src, dest in renamefiles:
2092 util.rename(src, dest)
2088 util.rename(src, dest)
2093 return a
2089 return a
2094
2090
2095 def instance(ui, path, create):
2091 def instance(ui, path, create):
2096 return localrepository(ui, util.drop_scheme('file', path), create)
2092 return localrepository(ui, util.drop_scheme('file', path), create)
2097
2093
2098 def islocal(path):
2094 def islocal(path):
2099 return True
2095 return True
General Comments 0
You need to be logged in to leave comments. Login now