##// END OF EJS Templates
commit: rename wctx to cctx
Matt Mackall -
r8712:dd3ebf81 default
parent child Browse files
Show More
@@ -1,2154 +1,2154 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 from lock import release
16 from lock import release
17 import weakref, stat, errno, os, time, inspect, bisect
17 import weakref, stat, errno, os, time, inspect, bisect
18 propertycache = util.propertycache
18 propertycache = util.propertycache
19
19
20 class localrepository(repo.repository):
20 class localrepository(repo.repository):
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
21 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 supported = set('revlogv1 store fncache'.split())
22 supported = set('revlogv1 store fncache'.split())
23
23
24 def __init__(self, baseui, path=None, create=0):
24 def __init__(self, baseui, path=None, create=0):
25 repo.repository.__init__(self)
25 repo.repository.__init__(self)
26 self.root = os.path.realpath(path)
26 self.root = os.path.realpath(path)
27 self.path = os.path.join(self.root, ".hg")
27 self.path = os.path.join(self.root, ".hg")
28 self.origroot = path
28 self.origroot = path
29 self.opener = util.opener(self.path)
29 self.opener = util.opener(self.path)
30 self.wopener = util.opener(self.root)
30 self.wopener = util.opener(self.root)
31
31
32 if not os.path.isdir(self.path):
32 if not os.path.isdir(self.path):
33 if create:
33 if create:
34 if not os.path.exists(path):
34 if not os.path.exists(path):
35 os.mkdir(path)
35 os.mkdir(path)
36 os.mkdir(self.path)
36 os.mkdir(self.path)
37 requirements = ["revlogv1"]
37 requirements = ["revlogv1"]
38 if baseui.configbool('format', 'usestore', True):
38 if baseui.configbool('format', 'usestore', True):
39 os.mkdir(os.path.join(self.path, "store"))
39 os.mkdir(os.path.join(self.path, "store"))
40 requirements.append("store")
40 requirements.append("store")
41 if baseui.configbool('format', 'usefncache', True):
41 if baseui.configbool('format', 'usefncache', True):
42 requirements.append("fncache")
42 requirements.append("fncache")
43 # create an invalid changelog
43 # create an invalid changelog
44 self.opener("00changelog.i", "a").write(
44 self.opener("00changelog.i", "a").write(
45 '\0\0\0\2' # represents revlogv2
45 '\0\0\0\2' # represents revlogv2
46 ' dummy changelog to prevent using the old repo layout'
46 ' dummy changelog to prevent using the old repo layout'
47 )
47 )
48 reqfile = self.opener("requires", "w")
48 reqfile = self.opener("requires", "w")
49 for r in requirements:
49 for r in requirements:
50 reqfile.write("%s\n" % r)
50 reqfile.write("%s\n" % r)
51 reqfile.close()
51 reqfile.close()
52 else:
52 else:
53 raise error.RepoError(_("repository %s not found") % path)
53 raise error.RepoError(_("repository %s not found") % path)
54 elif create:
54 elif create:
55 raise error.RepoError(_("repository %s already exists") % path)
55 raise error.RepoError(_("repository %s already exists") % path)
56 else:
56 else:
57 # find requirements
57 # find requirements
58 requirements = set()
58 requirements = set()
59 try:
59 try:
60 requirements = set(self.opener("requires").read().splitlines())
60 requirements = set(self.opener("requires").read().splitlines())
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64 for r in requirements - self.supported:
64 for r in requirements - self.supported:
65 raise error.RepoError(_("requirement '%s' not supported") % r)
65 raise error.RepoError(_("requirement '%s' not supported") % r)
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.baseui = baseui
73 self.baseui = baseui
74 self.ui = baseui.copy()
74 self.ui = baseui.copy()
75 try:
75 try:
76 self.ui.readconfig(self.join("hgrc"), self.root)
76 self.ui.readconfig(self.join("hgrc"), self.root)
77 extensions.loadall(self.ui)
77 extensions.loadall(self.ui)
78 except IOError:
78 except IOError:
79 pass
79 pass
80
80
81 self.tagscache = None
81 self.tagscache = None
82 self._tagstypecache = None
82 self._tagstypecache = None
83 self.branchcache = None
83 self.branchcache = None
84 self._ubranchcache = None # UTF-8 version of branchcache
84 self._ubranchcache = None # UTF-8 version of branchcache
85 self._branchcachetip = None
85 self._branchcachetip = None
86 self.nodetagscache = None
86 self.nodetagscache = None
87 self.filterpats = {}
87 self.filterpats = {}
88 self._datafilters = {}
88 self._datafilters = {}
89 self._transref = self._lockref = self._wlockref = None
89 self._transref = self._lockref = self._wlockref = None
90
90
91 @propertycache
91 @propertycache
92 def changelog(self):
92 def changelog(self):
93 c = changelog.changelog(self.sopener)
93 c = changelog.changelog(self.sopener)
94 if 'HG_PENDING' in os.environ:
94 if 'HG_PENDING' in os.environ:
95 p = os.environ['HG_PENDING']
95 p = os.environ['HG_PENDING']
96 if p.startswith(self.root):
96 if p.startswith(self.root):
97 c.readpending('00changelog.i.a')
97 c.readpending('00changelog.i.a')
98 self.sopener.defversion = c.version
98 self.sopener.defversion = c.version
99 return c
99 return c
100
100
101 @propertycache
101 @propertycache
102 def manifest(self):
102 def manifest(self):
103 return manifest.manifest(self.sopener)
103 return manifest.manifest(self.sopener)
104
104
105 @propertycache
105 @propertycache
106 def dirstate(self):
106 def dirstate(self):
107 return dirstate.dirstate(self.opener, self.ui, self.root)
107 return dirstate.dirstate(self.opener, self.ui, self.root)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid is None:
110 if changeid is None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, extra={}):
132 def _tag(self, names, node, message, local, user, date, extra={}):
133 if isinstance(names, str):
133 if isinstance(names, str):
134 allchars = names
134 allchars = names
135 names = (names,)
135 names = (names,)
136 else:
136 else:
137 allchars = ''.join(names)
137 allchars = ''.join(names)
138 for c in self.tag_disallowed:
138 for c in self.tag_disallowed:
139 if c in allchars:
139 if c in allchars:
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
140 raise util.Abort(_('%r cannot be used in a tag name') % c)
141
141
142 for name in names:
142 for name in names:
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
143 self.hook('pretag', throw=True, node=hex(node), tag=name,
144 local=local)
144 local=local)
145
145
146 def writetags(fp, names, munge, prevtags):
146 def writetags(fp, names, munge, prevtags):
147 fp.seek(0, 2)
147 fp.seek(0, 2)
148 if prevtags and prevtags[-1] != '\n':
148 if prevtags and prevtags[-1] != '\n':
149 fp.write('\n')
149 fp.write('\n')
150 for name in names:
150 for name in names:
151 m = munge and munge(name) or name
151 m = munge and munge(name) or name
152 if self._tagstypecache and name in self._tagstypecache:
152 if self._tagstypecache and name in self._tagstypecache:
153 old = self.tagscache.get(name, nullid)
153 old = self.tagscache.get(name, nullid)
154 fp.write('%s %s\n' % (hex(old), m))
154 fp.write('%s %s\n' % (hex(old), m))
155 fp.write('%s %s\n' % (hex(node), m))
155 fp.write('%s %s\n' % (hex(node), m))
156 fp.close()
156 fp.close()
157
157
158 prevtags = ''
158 prevtags = ''
159 if local:
159 if local:
160 try:
160 try:
161 fp = self.opener('localtags', 'r+')
161 fp = self.opener('localtags', 'r+')
162 except IOError:
162 except IOError:
163 fp = self.opener('localtags', 'a')
163 fp = self.opener('localtags', 'a')
164 else:
164 else:
165 prevtags = fp.read()
165 prevtags = fp.read()
166
166
167 # local tags are stored in the current charset
167 # local tags are stored in the current charset
168 writetags(fp, names, None, prevtags)
168 writetags(fp, names, None, prevtags)
169 for name in names:
169 for name in names:
170 self.hook('tag', node=hex(node), tag=name, local=local)
170 self.hook('tag', node=hex(node), tag=name, local=local)
171 return
171 return
172
172
173 try:
173 try:
174 fp = self.wfile('.hgtags', 'rb+')
174 fp = self.wfile('.hgtags', 'rb+')
175 except IOError:
175 except IOError:
176 fp = self.wfile('.hgtags', 'ab')
176 fp = self.wfile('.hgtags', 'ab')
177 else:
177 else:
178 prevtags = fp.read()
178 prevtags = fp.read()
179
179
180 # committed tags are stored in UTF-8
180 # committed tags are stored in UTF-8
181 writetags(fp, names, encoding.fromlocal, prevtags)
181 writetags(fp, names, encoding.fromlocal, prevtags)
182
182
183 if '.hgtags' not in self.dirstate:
183 if '.hgtags' not in self.dirstate:
184 self.add(['.hgtags'])
184 self.add(['.hgtags'])
185
185
186 m = match_.exact(self.root, '', ['.hgtags'])
186 m = match_.exact(self.root, '', ['.hgtags'])
187 tagnode = self.commit(message, user, date, extra=extra, match=m)
187 tagnode = self.commit(message, user, date, extra=extra, match=m)
188
188
189 for name in names:
189 for name in names:
190 self.hook('tag', node=hex(node), tag=name, local=local)
190 self.hook('tag', node=hex(node), tag=name, local=local)
191
191
192 return tagnode
192 return tagnode
193
193
194 def tag(self, names, node, message, local, user, date):
194 def tag(self, names, node, message, local, user, date):
195 '''tag a revision with one or more symbolic names.
195 '''tag a revision with one or more symbolic names.
196
196
197 names is a list of strings or, when adding a single tag, names may be a
197 names is a list of strings or, when adding a single tag, names may be a
198 string.
198 string.
199
199
200 if local is True, the tags are stored in a per-repository file.
200 if local is True, the tags are stored in a per-repository file.
201 otherwise, they are stored in the .hgtags file, and a new
201 otherwise, they are stored in the .hgtags file, and a new
202 changeset is committed with the change.
202 changeset is committed with the change.
203
203
204 keyword arguments:
204 keyword arguments:
205
205
206 local: whether to store tags in non-version-controlled file
206 local: whether to store tags in non-version-controlled file
207 (default False)
207 (default False)
208
208
209 message: commit message to use if committing
209 message: commit message to use if committing
210
210
211 user: name of user to use if committing
211 user: name of user to use if committing
212
212
213 date: date tuple to use if committing'''
213 date: date tuple to use if committing'''
214
214
215 for x in self.status()[:5]:
215 for x in self.status()[:5]:
216 if '.hgtags' in x:
216 if '.hgtags' in x:
217 raise util.Abort(_('working copy of .hgtags is changed '
217 raise util.Abort(_('working copy of .hgtags is changed '
218 '(please commit .hgtags manually)'))
218 '(please commit .hgtags manually)'))
219
219
220 self.tags() # instantiate the cache
220 self.tags() # instantiate the cache
221 self._tag(names, node, message, local, user, date)
221 self._tag(names, node, message, local, user, date)
222
222
223 def tags(self):
223 def tags(self):
224 '''return a mapping of tag to node'''
224 '''return a mapping of tag to node'''
225 if self.tagscache:
225 if self.tagscache:
226 return self.tagscache
226 return self.tagscache
227
227
228 globaltags = {}
228 globaltags = {}
229 tagtypes = {}
229 tagtypes = {}
230
230
231 def readtags(lines, fn, tagtype):
231 def readtags(lines, fn, tagtype):
232 filetags = {}
232 filetags = {}
233 count = 0
233 count = 0
234
234
235 def warn(msg):
235 def warn(msg):
236 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
236 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
237
237
238 for l in lines:
238 for l in lines:
239 count += 1
239 count += 1
240 if not l:
240 if not l:
241 continue
241 continue
242 s = l.split(" ", 1)
242 s = l.split(" ", 1)
243 if len(s) != 2:
243 if len(s) != 2:
244 warn(_("cannot parse entry"))
244 warn(_("cannot parse entry"))
245 continue
245 continue
246 node, key = s
246 node, key = s
247 key = encoding.tolocal(key.strip()) # stored in UTF-8
247 key = encoding.tolocal(key.strip()) # stored in UTF-8
248 try:
248 try:
249 bin_n = bin(node)
249 bin_n = bin(node)
250 except TypeError:
250 except TypeError:
251 warn(_("node '%s' is not well formed") % node)
251 warn(_("node '%s' is not well formed") % node)
252 continue
252 continue
253 if bin_n not in self.changelog.nodemap:
253 if bin_n not in self.changelog.nodemap:
254 warn(_("tag '%s' refers to unknown node") % key)
254 warn(_("tag '%s' refers to unknown node") % key)
255 continue
255 continue
256
256
257 h = []
257 h = []
258 if key in filetags:
258 if key in filetags:
259 n, h = filetags[key]
259 n, h = filetags[key]
260 h.append(n)
260 h.append(n)
261 filetags[key] = (bin_n, h)
261 filetags[key] = (bin_n, h)
262
262
263 for k, nh in filetags.iteritems():
263 for k, nh in filetags.iteritems():
264 if k not in globaltags:
264 if k not in globaltags:
265 globaltags[k] = nh
265 globaltags[k] = nh
266 tagtypes[k] = tagtype
266 tagtypes[k] = tagtype
267 continue
267 continue
268
268
269 # we prefer the global tag if:
269 # we prefer the global tag if:
270 # it supercedes us OR
270 # it supercedes us OR
271 # mutual supercedes and it has a higher rank
271 # mutual supercedes and it has a higher rank
272 # otherwise we win because we're tip-most
272 # otherwise we win because we're tip-most
273 an, ah = nh
273 an, ah = nh
274 bn, bh = globaltags[k]
274 bn, bh = globaltags[k]
275 if (bn != an and an in bh and
275 if (bn != an and an in bh and
276 (bn not in ah or len(bh) > len(ah))):
276 (bn not in ah or len(bh) > len(ah))):
277 an = bn
277 an = bn
278 ah.extend([n for n in bh if n not in ah])
278 ah.extend([n for n in bh if n not in ah])
279 globaltags[k] = an, ah
279 globaltags[k] = an, ah
280 tagtypes[k] = tagtype
280 tagtypes[k] = tagtype
281
281
282 # read the tags file from each head, ending with the tip
282 # read the tags file from each head, ending with the tip
283 f = None
283 f = None
284 for rev, node, fnode in self._hgtagsnodes():
284 for rev, node, fnode in self._hgtagsnodes():
285 f = (f and f.filectx(fnode) or
285 f = (f and f.filectx(fnode) or
286 self.filectx('.hgtags', fileid=fnode))
286 self.filectx('.hgtags', fileid=fnode))
287 readtags(f.data().splitlines(), f, "global")
287 readtags(f.data().splitlines(), f, "global")
288
288
289 try:
289 try:
290 data = encoding.fromlocal(self.opener("localtags").read())
290 data = encoding.fromlocal(self.opener("localtags").read())
291 # localtags are stored in the local character set
291 # localtags are stored in the local character set
292 # while the internal tag table is stored in UTF-8
292 # while the internal tag table is stored in UTF-8
293 readtags(data.splitlines(), "localtags", "local")
293 readtags(data.splitlines(), "localtags", "local")
294 except IOError:
294 except IOError:
295 pass
295 pass
296
296
297 self.tagscache = {}
297 self.tagscache = {}
298 self._tagstypecache = {}
298 self._tagstypecache = {}
299 for k, nh in globaltags.iteritems():
299 for k, nh in globaltags.iteritems():
300 n = nh[0]
300 n = nh[0]
301 if n != nullid:
301 if n != nullid:
302 self.tagscache[k] = n
302 self.tagscache[k] = n
303 self._tagstypecache[k] = tagtypes[k]
303 self._tagstypecache[k] = tagtypes[k]
304 self.tagscache['tip'] = self.changelog.tip()
304 self.tagscache['tip'] = self.changelog.tip()
305 return self.tagscache
305 return self.tagscache
306
306
307 def tagtype(self, tagname):
307 def tagtype(self, tagname):
308 '''
308 '''
309 return the type of the given tag. result can be:
309 return the type of the given tag. result can be:
310
310
311 'local' : a local tag
311 'local' : a local tag
312 'global' : a global tag
312 'global' : a global tag
313 None : tag does not exist
313 None : tag does not exist
314 '''
314 '''
315
315
316 self.tags()
316 self.tags()
317
317
318 return self._tagstypecache.get(tagname)
318 return self._tagstypecache.get(tagname)
319
319
320 def _hgtagsnodes(self):
320 def _hgtagsnodes(self):
321 last = {}
321 last = {}
322 ret = []
322 ret = []
323 for node in reversed(self.heads()):
323 for node in reversed(self.heads()):
324 c = self[node]
324 c = self[node]
325 rev = c.rev()
325 rev = c.rev()
326 try:
326 try:
327 fnode = c.filenode('.hgtags')
327 fnode = c.filenode('.hgtags')
328 except error.LookupError:
328 except error.LookupError:
329 continue
329 continue
330 ret.append((rev, node, fnode))
330 ret.append((rev, node, fnode))
331 if fnode in last:
331 if fnode in last:
332 ret[last[fnode]] = None
332 ret[last[fnode]] = None
333 last[fnode] = len(ret) - 1
333 last[fnode] = len(ret) - 1
334 return [item for item in ret if item]
334 return [item for item in ret if item]
335
335
336 def tagslist(self):
336 def tagslist(self):
337 '''return a list of tags ordered by revision'''
337 '''return a list of tags ordered by revision'''
338 l = []
338 l = []
339 for t, n in self.tags().iteritems():
339 for t, n in self.tags().iteritems():
340 try:
340 try:
341 r = self.changelog.rev(n)
341 r = self.changelog.rev(n)
342 except:
342 except:
343 r = -2 # sort to the beginning of the list if unknown
343 r = -2 # sort to the beginning of the list if unknown
344 l.append((r, t, n))
344 l.append((r, t, n))
345 return [(t, n) for r, t, n in sorted(l)]
345 return [(t, n) for r, t, n in sorted(l)]
346
346
347 def nodetags(self, node):
347 def nodetags(self, node):
348 '''return the tags associated with a node'''
348 '''return the tags associated with a node'''
349 if not self.nodetagscache:
349 if not self.nodetagscache:
350 self.nodetagscache = {}
350 self.nodetagscache = {}
351 for t, n in self.tags().iteritems():
351 for t, n in self.tags().iteritems():
352 self.nodetagscache.setdefault(n, []).append(t)
352 self.nodetagscache.setdefault(n, []).append(t)
353 return self.nodetagscache.get(node, [])
353 return self.nodetagscache.get(node, [])
354
354
355 def _branchtags(self, partial, lrev):
355 def _branchtags(self, partial, lrev):
356 # TODO: rename this function?
356 # TODO: rename this function?
357 tiprev = len(self) - 1
357 tiprev = len(self) - 1
358 if lrev != tiprev:
358 if lrev != tiprev:
359 self._updatebranchcache(partial, lrev+1, tiprev+1)
359 self._updatebranchcache(partial, lrev+1, tiprev+1)
360 self._writebranchcache(partial, self.changelog.tip(), tiprev)
360 self._writebranchcache(partial, self.changelog.tip(), tiprev)
361
361
362 return partial
362 return partial
363
363
364 def branchmap(self):
364 def branchmap(self):
365 tip = self.changelog.tip()
365 tip = self.changelog.tip()
366 if self.branchcache is not None and self._branchcachetip == tip:
366 if self.branchcache is not None and self._branchcachetip == tip:
367 return self.branchcache
367 return self.branchcache
368
368
369 oldtip = self._branchcachetip
369 oldtip = self._branchcachetip
370 self._branchcachetip = tip
370 self._branchcachetip = tip
371 if self.branchcache is None:
371 if self.branchcache is None:
372 self.branchcache = {} # avoid recursion in changectx
372 self.branchcache = {} # avoid recursion in changectx
373 else:
373 else:
374 self.branchcache.clear() # keep using the same dict
374 self.branchcache.clear() # keep using the same dict
375 if oldtip is None or oldtip not in self.changelog.nodemap:
375 if oldtip is None or oldtip not in self.changelog.nodemap:
376 partial, last, lrev = self._readbranchcache()
376 partial, last, lrev = self._readbranchcache()
377 else:
377 else:
378 lrev = self.changelog.rev(oldtip)
378 lrev = self.changelog.rev(oldtip)
379 partial = self._ubranchcache
379 partial = self._ubranchcache
380
380
381 self._branchtags(partial, lrev)
381 self._branchtags(partial, lrev)
382 # this private cache holds all heads (not just tips)
382 # this private cache holds all heads (not just tips)
383 self._ubranchcache = partial
383 self._ubranchcache = partial
384
384
385 # the branch cache is stored on disk as UTF-8, but in the local
385 # the branch cache is stored on disk as UTF-8, but in the local
386 # charset internally
386 # charset internally
387 for k, v in partial.iteritems():
387 for k, v in partial.iteritems():
388 self.branchcache[encoding.tolocal(k)] = v
388 self.branchcache[encoding.tolocal(k)] = v
389 return self.branchcache
389 return self.branchcache
390
390
391
391
392 def branchtags(self):
392 def branchtags(self):
393 '''return a dict where branch names map to the tipmost head of
393 '''return a dict where branch names map to the tipmost head of
394 the branch, open heads come before closed'''
394 the branch, open heads come before closed'''
395 bt = {}
395 bt = {}
396 for bn, heads in self.branchmap().iteritems():
396 for bn, heads in self.branchmap().iteritems():
397 head = None
397 head = None
398 for i in range(len(heads)-1, -1, -1):
398 for i in range(len(heads)-1, -1, -1):
399 h = heads[i]
399 h = heads[i]
400 if 'close' not in self.changelog.read(h)[5]:
400 if 'close' not in self.changelog.read(h)[5]:
401 head = h
401 head = h
402 break
402 break
403 # no open heads were found
403 # no open heads were found
404 if head is None:
404 if head is None:
405 head = heads[-1]
405 head = heads[-1]
406 bt[bn] = head
406 bt[bn] = head
407 return bt
407 return bt
408
408
409
409
410 def _readbranchcache(self):
410 def _readbranchcache(self):
411 partial = {}
411 partial = {}
412 try:
412 try:
413 f = self.opener("branchheads.cache")
413 f = self.opener("branchheads.cache")
414 lines = f.read().split('\n')
414 lines = f.read().split('\n')
415 f.close()
415 f.close()
416 except (IOError, OSError):
416 except (IOError, OSError):
417 return {}, nullid, nullrev
417 return {}, nullid, nullrev
418
418
419 try:
419 try:
420 last, lrev = lines.pop(0).split(" ", 1)
420 last, lrev = lines.pop(0).split(" ", 1)
421 last, lrev = bin(last), int(lrev)
421 last, lrev = bin(last), int(lrev)
422 if lrev >= len(self) or self[lrev].node() != last:
422 if lrev >= len(self) or self[lrev].node() != last:
423 # invalidate the cache
423 # invalidate the cache
424 raise ValueError('invalidating branch cache (tip differs)')
424 raise ValueError('invalidating branch cache (tip differs)')
425 for l in lines:
425 for l in lines:
426 if not l: continue
426 if not l: continue
427 node, label = l.split(" ", 1)
427 node, label = l.split(" ", 1)
428 partial.setdefault(label.strip(), []).append(bin(node))
428 partial.setdefault(label.strip(), []).append(bin(node))
429 except KeyboardInterrupt:
429 except KeyboardInterrupt:
430 raise
430 raise
431 except Exception, inst:
431 except Exception, inst:
432 if self.ui.debugflag:
432 if self.ui.debugflag:
433 self.ui.warn(str(inst), '\n')
433 self.ui.warn(str(inst), '\n')
434 partial, last, lrev = {}, nullid, nullrev
434 partial, last, lrev = {}, nullid, nullrev
435 return partial, last, lrev
435 return partial, last, lrev
436
436
437 def _writebranchcache(self, branches, tip, tiprev):
437 def _writebranchcache(self, branches, tip, tiprev):
438 try:
438 try:
439 f = self.opener("branchheads.cache", "w", atomictemp=True)
439 f = self.opener("branchheads.cache", "w", atomictemp=True)
440 f.write("%s %s\n" % (hex(tip), tiprev))
440 f.write("%s %s\n" % (hex(tip), tiprev))
441 for label, nodes in branches.iteritems():
441 for label, nodes in branches.iteritems():
442 for node in nodes:
442 for node in nodes:
443 f.write("%s %s\n" % (hex(node), label))
443 f.write("%s %s\n" % (hex(node), label))
444 f.rename()
444 f.rename()
445 except (IOError, OSError):
445 except (IOError, OSError):
446 pass
446 pass
447
447
448 def _updatebranchcache(self, partial, start, end):
448 def _updatebranchcache(self, partial, start, end):
449 for r in xrange(start, end):
449 for r in xrange(start, end):
450 c = self[r]
450 c = self[r]
451 b = c.branch()
451 b = c.branch()
452 bheads = partial.setdefault(b, [])
452 bheads = partial.setdefault(b, [])
453 bheads.append(c.node())
453 bheads.append(c.node())
454 for p in c.parents():
454 for p in c.parents():
455 pn = p.node()
455 pn = p.node()
456 if pn in bheads:
456 if pn in bheads:
457 bheads.remove(pn)
457 bheads.remove(pn)
458
458
459 def lookup(self, key):
459 def lookup(self, key):
460 if isinstance(key, int):
460 if isinstance(key, int):
461 return self.changelog.node(key)
461 return self.changelog.node(key)
462 elif key == '.':
462 elif key == '.':
463 return self.dirstate.parents()[0]
463 return self.dirstate.parents()[0]
464 elif key == 'null':
464 elif key == 'null':
465 return nullid
465 return nullid
466 elif key == 'tip':
466 elif key == 'tip':
467 return self.changelog.tip()
467 return self.changelog.tip()
468 n = self.changelog._match(key)
468 n = self.changelog._match(key)
469 if n:
469 if n:
470 return n
470 return n
471 if key in self.tags():
471 if key in self.tags():
472 return self.tags()[key]
472 return self.tags()[key]
473 if key in self.branchtags():
473 if key in self.branchtags():
474 return self.branchtags()[key]
474 return self.branchtags()[key]
475 n = self.changelog._partialmatch(key)
475 n = self.changelog._partialmatch(key)
476 if n:
476 if n:
477 return n
477 return n
478
478
479 # can't find key, check if it might have come from damaged dirstate
479 # can't find key, check if it might have come from damaged dirstate
480 if key in self.dirstate.parents():
480 if key in self.dirstate.parents():
481 raise error.Abort(_("working directory has unknown parent '%s'!")
481 raise error.Abort(_("working directory has unknown parent '%s'!")
482 % short(key))
482 % short(key))
483 try:
483 try:
484 if len(key) == 20:
484 if len(key) == 20:
485 key = hex(key)
485 key = hex(key)
486 except:
486 except:
487 pass
487 pass
488 raise error.RepoError(_("unknown revision '%s'") % key)
488 raise error.RepoError(_("unknown revision '%s'") % key)
489
489
490 def local(self):
490 def local(self):
491 return True
491 return True
492
492
493 def join(self, f):
493 def join(self, f):
494 return os.path.join(self.path, f)
494 return os.path.join(self.path, f)
495
495
496 def wjoin(self, f):
496 def wjoin(self, f):
497 return os.path.join(self.root, f)
497 return os.path.join(self.root, f)
498
498
499 def rjoin(self, f):
499 def rjoin(self, f):
500 return os.path.join(self.root, util.pconvert(f))
500 return os.path.join(self.root, util.pconvert(f))
501
501
502 def file(self, f):
502 def file(self, f):
503 if f[0] == '/':
503 if f[0] == '/':
504 f = f[1:]
504 f = f[1:]
505 return filelog.filelog(self.sopener, f)
505 return filelog.filelog(self.sopener, f)
506
506
507 def changectx(self, changeid):
507 def changectx(self, changeid):
508 return self[changeid]
508 return self[changeid]
509
509
510 def parents(self, changeid=None):
510 def parents(self, changeid=None):
511 '''get list of changectxs for parents of changeid'''
511 '''get list of changectxs for parents of changeid'''
512 return self[changeid].parents()
512 return self[changeid].parents()
513
513
514 def filectx(self, path, changeid=None, fileid=None):
514 def filectx(self, path, changeid=None, fileid=None):
515 """changeid can be a changeset revision, node, or tag.
515 """changeid can be a changeset revision, node, or tag.
516 fileid can be a file revision or node."""
516 fileid can be a file revision or node."""
517 return context.filectx(self, path, changeid, fileid)
517 return context.filectx(self, path, changeid, fileid)
518
518
519 def getcwd(self):
519 def getcwd(self):
520 return self.dirstate.getcwd()
520 return self.dirstate.getcwd()
521
521
522 def pathto(self, f, cwd=None):
522 def pathto(self, f, cwd=None):
523 return self.dirstate.pathto(f, cwd)
523 return self.dirstate.pathto(f, cwd)
524
524
525 def wfile(self, f, mode='r'):
525 def wfile(self, f, mode='r'):
526 return self.wopener(f, mode)
526 return self.wopener(f, mode)
527
527
528 def _link(self, f):
528 def _link(self, f):
529 return os.path.islink(self.wjoin(f))
529 return os.path.islink(self.wjoin(f))
530
530
531 def _filter(self, filter, filename, data):
531 def _filter(self, filter, filename, data):
532 if filter not in self.filterpats:
532 if filter not in self.filterpats:
533 l = []
533 l = []
534 for pat, cmd in self.ui.configitems(filter):
534 for pat, cmd in self.ui.configitems(filter):
535 if cmd == '!':
535 if cmd == '!':
536 continue
536 continue
537 mf = match_.match(self.root, '', [pat])
537 mf = match_.match(self.root, '', [pat])
538 fn = None
538 fn = None
539 params = cmd
539 params = cmd
540 for name, filterfn in self._datafilters.iteritems():
540 for name, filterfn in self._datafilters.iteritems():
541 if cmd.startswith(name):
541 if cmd.startswith(name):
542 fn = filterfn
542 fn = filterfn
543 params = cmd[len(name):].lstrip()
543 params = cmd[len(name):].lstrip()
544 break
544 break
545 if not fn:
545 if not fn:
546 fn = lambda s, c, **kwargs: util.filter(s, c)
546 fn = lambda s, c, **kwargs: util.filter(s, c)
547 # Wrap old filters not supporting keyword arguments
547 # Wrap old filters not supporting keyword arguments
548 if not inspect.getargspec(fn)[2]:
548 if not inspect.getargspec(fn)[2]:
549 oldfn = fn
549 oldfn = fn
550 fn = lambda s, c, **kwargs: oldfn(s, c)
550 fn = lambda s, c, **kwargs: oldfn(s, c)
551 l.append((mf, fn, params))
551 l.append((mf, fn, params))
552 self.filterpats[filter] = l
552 self.filterpats[filter] = l
553
553
554 for mf, fn, cmd in self.filterpats[filter]:
554 for mf, fn, cmd in self.filterpats[filter]:
555 if mf(filename):
555 if mf(filename):
556 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
556 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
557 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
557 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
558 break
558 break
559
559
560 return data
560 return data
561
561
562 def adddatafilter(self, name, filter):
562 def adddatafilter(self, name, filter):
563 self._datafilters[name] = filter
563 self._datafilters[name] = filter
564
564
565 def wread(self, filename):
565 def wread(self, filename):
566 if self._link(filename):
566 if self._link(filename):
567 data = os.readlink(self.wjoin(filename))
567 data = os.readlink(self.wjoin(filename))
568 else:
568 else:
569 data = self.wopener(filename, 'r').read()
569 data = self.wopener(filename, 'r').read()
570 return self._filter("encode", filename, data)
570 return self._filter("encode", filename, data)
571
571
572 def wwrite(self, filename, data, flags):
572 def wwrite(self, filename, data, flags):
573 data = self._filter("decode", filename, data)
573 data = self._filter("decode", filename, data)
574 try:
574 try:
575 os.unlink(self.wjoin(filename))
575 os.unlink(self.wjoin(filename))
576 except OSError:
576 except OSError:
577 pass
577 pass
578 if 'l' in flags:
578 if 'l' in flags:
579 self.wopener.symlink(data, filename)
579 self.wopener.symlink(data, filename)
580 else:
580 else:
581 self.wopener(filename, 'w').write(data)
581 self.wopener(filename, 'w').write(data)
582 if 'x' in flags:
582 if 'x' in flags:
583 util.set_flags(self.wjoin(filename), False, True)
583 util.set_flags(self.wjoin(filename), False, True)
584
584
585 def wwritedata(self, filename, data):
585 def wwritedata(self, filename, data):
586 return self._filter("decode", filename, data)
586 return self._filter("decode", filename, data)
587
587
588 def transaction(self):
588 def transaction(self):
589 tr = self._transref and self._transref() or None
589 tr = self._transref and self._transref() or None
590 if tr and tr.running():
590 if tr and tr.running():
591 return tr.nest()
591 return tr.nest()
592
592
593 # abort here if the journal already exists
593 # abort here if the journal already exists
594 if os.path.exists(self.sjoin("journal")):
594 if os.path.exists(self.sjoin("journal")):
595 raise error.RepoError(_("journal already exists - run hg recover"))
595 raise error.RepoError(_("journal already exists - run hg recover"))
596
596
597 # save dirstate for rollback
597 # save dirstate for rollback
598 try:
598 try:
599 ds = self.opener("dirstate").read()
599 ds = self.opener("dirstate").read()
600 except IOError:
600 except IOError:
601 ds = ""
601 ds = ""
602 self.opener("journal.dirstate", "w").write(ds)
602 self.opener("journal.dirstate", "w").write(ds)
603 self.opener("journal.branch", "w").write(self.dirstate.branch())
603 self.opener("journal.branch", "w").write(self.dirstate.branch())
604
604
605 renames = [(self.sjoin("journal"), self.sjoin("undo")),
605 renames = [(self.sjoin("journal"), self.sjoin("undo")),
606 (self.join("journal.dirstate"), self.join("undo.dirstate")),
606 (self.join("journal.dirstate"), self.join("undo.dirstate")),
607 (self.join("journal.branch"), self.join("undo.branch"))]
607 (self.join("journal.branch"), self.join("undo.branch"))]
608 tr = transaction.transaction(self.ui.warn, self.sopener,
608 tr = transaction.transaction(self.ui.warn, self.sopener,
609 self.sjoin("journal"),
609 self.sjoin("journal"),
610 aftertrans(renames),
610 aftertrans(renames),
611 self.store.createmode)
611 self.store.createmode)
612 self._transref = weakref.ref(tr)
612 self._transref = weakref.ref(tr)
613 return tr
613 return tr
614
614
615 def recover(self):
615 def recover(self):
616 lock = self.lock()
616 lock = self.lock()
617 try:
617 try:
618 if os.path.exists(self.sjoin("journal")):
618 if os.path.exists(self.sjoin("journal")):
619 self.ui.status(_("rolling back interrupted transaction\n"))
619 self.ui.status(_("rolling back interrupted transaction\n"))
620 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
620 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
621 self.invalidate()
621 self.invalidate()
622 return True
622 return True
623 else:
623 else:
624 self.ui.warn(_("no interrupted transaction available\n"))
624 self.ui.warn(_("no interrupted transaction available\n"))
625 return False
625 return False
626 finally:
626 finally:
627 lock.release()
627 lock.release()
628
628
629 def rollback(self):
629 def rollback(self):
630 wlock = lock = None
630 wlock = lock = None
631 try:
631 try:
632 wlock = self.wlock()
632 wlock = self.wlock()
633 lock = self.lock()
633 lock = self.lock()
634 if os.path.exists(self.sjoin("undo")):
634 if os.path.exists(self.sjoin("undo")):
635 self.ui.status(_("rolling back last transaction\n"))
635 self.ui.status(_("rolling back last transaction\n"))
636 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
636 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
637 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
638 try:
638 try:
639 branch = self.opener("undo.branch").read()
639 branch = self.opener("undo.branch").read()
640 self.dirstate.setbranch(branch)
640 self.dirstate.setbranch(branch)
641 except IOError:
641 except IOError:
642 self.ui.warn(_("Named branch could not be reset, "
642 self.ui.warn(_("Named branch could not be reset, "
643 "current branch still is: %s\n")
643 "current branch still is: %s\n")
644 % encoding.tolocal(self.dirstate.branch()))
644 % encoding.tolocal(self.dirstate.branch()))
645 self.invalidate()
645 self.invalidate()
646 self.dirstate.invalidate()
646 self.dirstate.invalidate()
647 else:
647 else:
648 self.ui.warn(_("no rollback information available\n"))
648 self.ui.warn(_("no rollback information available\n"))
649 finally:
649 finally:
650 release(lock, wlock)
650 release(lock, wlock)
651
651
652 def invalidate(self):
652 def invalidate(self):
653 for a in "changelog manifest".split():
653 for a in "changelog manifest".split():
654 if a in self.__dict__:
654 if a in self.__dict__:
655 delattr(self, a)
655 delattr(self, a)
656 self.tagscache = None
656 self.tagscache = None
657 self._tagstypecache = None
657 self._tagstypecache = None
658 self.nodetagscache = None
658 self.nodetagscache = None
659 self.branchcache = None
659 self.branchcache = None
660 self._ubranchcache = None
660 self._ubranchcache = None
661 self._branchcachetip = None
661 self._branchcachetip = None
662
662
663 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
663 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
664 try:
664 try:
665 l = lock.lock(lockname, 0, releasefn, desc=desc)
665 l = lock.lock(lockname, 0, releasefn, desc=desc)
666 except error.LockHeld, inst:
666 except error.LockHeld, inst:
667 if not wait:
667 if not wait:
668 raise
668 raise
669 self.ui.warn(_("waiting for lock on %s held by %r\n") %
669 self.ui.warn(_("waiting for lock on %s held by %r\n") %
670 (desc, inst.locker))
670 (desc, inst.locker))
671 # default to 600 seconds timeout
671 # default to 600 seconds timeout
672 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
672 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
673 releasefn, desc=desc)
673 releasefn, desc=desc)
674 if acquirefn:
674 if acquirefn:
675 acquirefn()
675 acquirefn()
676 return l
676 return l
677
677
678 def lock(self, wait=True):
678 def lock(self, wait=True):
679 l = self._lockref and self._lockref()
679 l = self._lockref and self._lockref()
680 if l is not None and l.held:
680 if l is not None and l.held:
681 l.lock()
681 l.lock()
682 return l
682 return l
683
683
684 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
684 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
685 _('repository %s') % self.origroot)
685 _('repository %s') % self.origroot)
686 self._lockref = weakref.ref(l)
686 self._lockref = weakref.ref(l)
687 return l
687 return l
688
688
689 def wlock(self, wait=True):
689 def wlock(self, wait=True):
690 l = self._wlockref and self._wlockref()
690 l = self._wlockref and self._wlockref()
691 if l is not None and l.held:
691 if l is not None and l.held:
692 l.lock()
692 l.lock()
693 return l
693 return l
694
694
695 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
695 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
696 self.dirstate.invalidate, _('working directory of %s') %
696 self.dirstate.invalidate, _('working directory of %s') %
697 self.origroot)
697 self.origroot)
698 self._wlockref = weakref.ref(l)
698 self._wlockref = weakref.ref(l)
699 return l
699 return l
700
700
701 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
701 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
702 """
702 """
703 commit an individual file as part of a larger transaction
703 commit an individual file as part of a larger transaction
704 """
704 """
705
705
706 fname = fctx.path()
706 fname = fctx.path()
707 text = fctx.data()
707 text = fctx.data()
708 flog = self.file(fname)
708 flog = self.file(fname)
709 fparent1 = manifest1.get(fname, nullid)
709 fparent1 = manifest1.get(fname, nullid)
710 fparent2 = fparent2o = manifest2.get(fname, nullid)
710 fparent2 = fparent2o = manifest2.get(fname, nullid)
711
711
712 meta = {}
712 meta = {}
713 copy = fctx.renamed()
713 copy = fctx.renamed()
714 if copy and copy[0] != fname:
714 if copy and copy[0] != fname:
715 # Mark the new revision of this file as a copy of another
715 # Mark the new revision of this file as a copy of another
716 # file. This copy data will effectively act as a parent
716 # file. This copy data will effectively act as a parent
717 # of this new revision. If this is a merge, the first
717 # of this new revision. If this is a merge, the first
718 # parent will be the nullid (meaning "look up the copy data")
718 # parent will be the nullid (meaning "look up the copy data")
719 # and the second one will be the other parent. For example:
719 # and the second one will be the other parent. For example:
720 #
720 #
721 # 0 --- 1 --- 3 rev1 changes file foo
721 # 0 --- 1 --- 3 rev1 changes file foo
722 # \ / rev2 renames foo to bar and changes it
722 # \ / rev2 renames foo to bar and changes it
723 # \- 2 -/ rev3 should have bar with all changes and
723 # \- 2 -/ rev3 should have bar with all changes and
724 # should record that bar descends from
724 # should record that bar descends from
725 # bar in rev2 and foo in rev1
725 # bar in rev2 and foo in rev1
726 #
726 #
727 # this allows this merge to succeed:
727 # this allows this merge to succeed:
728 #
728 #
729 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
729 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
730 # \ / merging rev3 and rev4 should use bar@rev2
730 # \ / merging rev3 and rev4 should use bar@rev2
731 # \- 2 --- 4 as the merge base
731 # \- 2 --- 4 as the merge base
732 #
732 #
733
733
734 cfname = copy[0]
734 cfname = copy[0]
735 crev = manifest1.get(cfname)
735 crev = manifest1.get(cfname)
736 newfparent = fparent2
736 newfparent = fparent2
737
737
738 if manifest2: # branch merge
738 if manifest2: # branch merge
739 if fparent2 == nullid or crev is None: # copied on remote side
739 if fparent2 == nullid or crev is None: # copied on remote side
740 if cfname in manifest2:
740 if cfname in manifest2:
741 crev = manifest2[cfname]
741 crev = manifest2[cfname]
742 newfparent = fparent1
742 newfparent = fparent1
743
743
744 # find source in nearest ancestor if we've lost track
744 # find source in nearest ancestor if we've lost track
745 if not crev:
745 if not crev:
746 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
746 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
747 (fname, cfname))
747 (fname, cfname))
748 for ancestor in self['.'].ancestors():
748 for ancestor in self['.'].ancestors():
749 if cfname in ancestor:
749 if cfname in ancestor:
750 crev = ancestor[cfname].filenode()
750 crev = ancestor[cfname].filenode()
751 break
751 break
752
752
753 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
753 self.ui.debug(_(" %s: copy %s:%s\n") % (fname, cfname, hex(crev)))
754 meta["copy"] = cfname
754 meta["copy"] = cfname
755 meta["copyrev"] = hex(crev)
755 meta["copyrev"] = hex(crev)
756 fparent1, fparent2 = nullid, newfparent
756 fparent1, fparent2 = nullid, newfparent
757 elif fparent2 != nullid:
757 elif fparent2 != nullid:
758 # is one parent an ancestor of the other?
758 # is one parent an ancestor of the other?
759 fparentancestor = flog.ancestor(fparent1, fparent2)
759 fparentancestor = flog.ancestor(fparent1, fparent2)
760 if fparentancestor == fparent1:
760 if fparentancestor == fparent1:
761 fparent1, fparent2 = fparent2, nullid
761 fparent1, fparent2 = fparent2, nullid
762 elif fparentancestor == fparent2:
762 elif fparentancestor == fparent2:
763 fparent2 = nullid
763 fparent2 = nullid
764
764
765 # is the file changed?
765 # is the file changed?
766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
766 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
767 changelist.append(fname)
767 changelist.append(fname)
768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
768 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
769
769
770 # are just the flags changed during merge?
770 # are just the flags changed during merge?
771 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
771 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
772 changelist.append(fname)
772 changelist.append(fname)
773
773
774 return fparent1
774 return fparent1
775
775
776 def commit(self, text="", user=None, date=None, match=None, force=False,
776 def commit(self, text="", user=None, date=None, match=None, force=False,
777 editor=False, extra={}):
777 editor=False, extra={}):
778 """Add a new revision to current repository.
778 """Add a new revision to current repository.
779
779
780 Revision information is gathered from the working directory,
780 Revision information is gathered from the working directory,
781 match can be used to filter the committed files. If editor is
781 match can be used to filter the committed files. If editor is
782 supplied, it is called to get a commit message.
782 supplied, it is called to get a commit message.
783 """
783 """
784
784
785 wlock = self.wlock()
785 wlock = self.wlock()
786 try:
786 try:
787 p1, p2 = self.dirstate.parents()
787 p1, p2 = self.dirstate.parents()
788
788
789 if (not force and p2 != nullid and match and
789 if (not force and p2 != nullid and match and
790 (match.files() or match.anypats())):
790 (match.files() or match.anypats())):
791 raise util.Abort(_('cannot partially commit a merge '
791 raise util.Abort(_('cannot partially commit a merge '
792 '(do not specify files or patterns)'))
792 '(do not specify files or patterns)'))
793
793
794 def fail(f, msg):
794 def fail(f, msg):
795 raise util.Abort('%s: %s' % (f, msg))
795 raise util.Abort('%s: %s' % (f, msg))
796
796
797 if not match:
797 if not match:
798 match = match_.always(self.root, '')
798 match = match_.always(self.root, '')
799
799
800 if not force:
800 if not force:
801 vdirs = []
801 vdirs = []
802 match.dir = vdirs.append
802 match.dir = vdirs.append
803 match.bad = fail
803 match.bad = fail
804
804
805 changes = self.status(match=match, clean=force)
805 changes = self.status(match=match, clean=force)
806 if force:
806 if force:
807 changes[0].extend(changes[6]) # mq may commit unchanged files
807 changes[0].extend(changes[6]) # mq may commit unchanged files
808
808
809 # make sure all explicit patterns are matched
809 # make sure all explicit patterns are matched
810 if not force and match.files():
810 if not force and match.files():
811 matched = set(changes[0] + changes[1] + changes[2])
811 matched = set(changes[0] + changes[1] + changes[2])
812
812
813 for f in match.files():
813 for f in match.files():
814 if f == '.' or f in matched: # matched
814 if f == '.' or f in matched: # matched
815 continue
815 continue
816 if f in changes[3]: # missing
816 if f in changes[3]: # missing
817 fail(f, _('file not found!'))
817 fail(f, _('file not found!'))
818 if f in vdirs: # visited directory
818 if f in vdirs: # visited directory
819 d = f + '/'
819 d = f + '/'
820 for mf in matched:
820 for mf in matched:
821 if mf.startswith(d):
821 if mf.startswith(d):
822 break
822 break
823 else:
823 else:
824 fail(f, _("no match under directory!"))
824 fail(f, _("no match under directory!"))
825 elif f not in self.dirstate:
825 elif f not in self.dirstate:
826 fail(f, _("file not tracked!"))
826 fail(f, _("file not tracked!"))
827
827
828 if (not force and not extra.get("close") and p2 == nullid
828 if (not force and not extra.get("close") and p2 == nullid
829 and not (changes[0] or changes[1] or changes[2])
829 and not (changes[0] or changes[1] or changes[2])
830 and self[None].branch() == self['.'].branch()):
830 and self[None].branch() == self['.'].branch()):
831 self.ui.status(_("nothing changed\n"))
831 self.ui.status(_("nothing changed\n"))
832 return None
832 return None
833
833
834 ms = merge_.mergestate(self)
834 ms = merge_.mergestate(self)
835 for f in changes[0]:
835 for f in changes[0]:
836 if f in ms and ms[f] == 'u':
836 if f in ms and ms[f] == 'u':
837 raise util.Abort(_("unresolved merge conflicts "
837 raise util.Abort(_("unresolved merge conflicts "
838 "(see hg resolve)"))
838 "(see hg resolve)"))
839
839
840 wctx = context.workingctx(self, (p1, p2), text, user, date,
840 cctx = context.workingctx(self, (p1, p2), text, user, date,
841 extra, changes)
841 extra, changes)
842 if editor:
842 if editor:
843 wctx._text = editor(self, wctx)
843 cctx._text = editor(self, cctx)
844 ret = self.commitctx(wctx, True)
844 ret = self.commitctx(cctx, True)
845
845
846 # update dirstate and mergestate
846 # update dirstate and mergestate
847 for f in changes[0] + changes[1]:
847 for f in changes[0] + changes[1]:
848 self.dirstate.normal(f)
848 self.dirstate.normal(f)
849 for f in changes[2]:
849 for f in changes[2]:
850 self.dirstate.forget(f)
850 self.dirstate.forget(f)
851 self.dirstate.setparents(ret)
851 self.dirstate.setparents(ret)
852 ms.reset()
852 ms.reset()
853
853
854 return ret
854 return ret
855
855
856 finally:
856 finally:
857 wlock.release()
857 wlock.release()
858
858
859 def commitctx(self, ctx, error=False):
859 def commitctx(self, ctx, error=False):
860 """Add a new revision to current repository.
860 """Add a new revision to current repository.
861
861
862 Revision information is passed via the context argument.
862 Revision information is passed via the context argument.
863 """
863 """
864
864
865 tr = lock = None
865 tr = lock = None
866 removed = ctx.removed()
866 removed = ctx.removed()
867 p1, p2 = ctx.p1(), ctx.p2()
867 p1, p2 = ctx.p1(), ctx.p2()
868 m1 = p1.manifest().copy()
868 m1 = p1.manifest().copy()
869 m2 = p2.manifest()
869 m2 = p2.manifest()
870 user = ctx.user()
870 user = ctx.user()
871
871
872 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
872 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
873 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
873 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
874
874
875 lock = self.lock()
875 lock = self.lock()
876 try:
876 try:
877 tr = self.transaction()
877 tr = self.transaction()
878 trp = weakref.proxy(tr)
878 trp = weakref.proxy(tr)
879
879
880 # check in files
880 # check in files
881 new = {}
881 new = {}
882 changed = []
882 changed = []
883 linkrev = len(self)
883 linkrev = len(self)
884 for f in sorted(ctx.modified() + ctx.added()):
884 for f in sorted(ctx.modified() + ctx.added()):
885 self.ui.note(f + "\n")
885 self.ui.note(f + "\n")
886 try:
886 try:
887 fctx = ctx[f]
887 fctx = ctx[f]
888 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
888 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
889 changed)
889 changed)
890 m1.set(f, fctx.flags())
890 m1.set(f, fctx.flags())
891 except (OSError, IOError):
891 except (OSError, IOError):
892 if error:
892 if error:
893 self.ui.warn(_("trouble committing %s!\n") % f)
893 self.ui.warn(_("trouble committing %s!\n") % f)
894 raise
894 raise
895 else:
895 else:
896 removed.append(f)
896 removed.append(f)
897
897
898 # update manifest
898 # update manifest
899 m1.update(new)
899 m1.update(new)
900 removed = [f for f in sorted(removed) if f in m1 or f in m2]
900 removed = [f for f in sorted(removed) if f in m1 or f in m2]
901 drop = [f for f in removed if f in m1]
901 drop = [f for f in removed if f in m1]
902 for f in drop:
902 for f in drop:
903 del m1[f]
903 del m1[f]
904 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
904 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
905 p2.manifestnode(), (new, drop))
905 p2.manifestnode(), (new, drop))
906
906
907 # update changelog
907 # update changelog
908 self.changelog.delayupdate()
908 self.changelog.delayupdate()
909 n = self.changelog.add(mn, changed + removed, ctx.description(),
909 n = self.changelog.add(mn, changed + removed, ctx.description(),
910 trp, p1.node(), p2.node(),
910 trp, p1.node(), p2.node(),
911 user, ctx.date(), ctx.extra().copy())
911 user, ctx.date(), ctx.extra().copy())
912 p = lambda: self.changelog.writepending() and self.root or ""
912 p = lambda: self.changelog.writepending() and self.root or ""
913 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
913 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
914 parent2=xp2, pending=p)
914 parent2=xp2, pending=p)
915 self.changelog.finalize(trp)
915 self.changelog.finalize(trp)
916 tr.close()
916 tr.close()
917
917
918 if self.branchcache:
918 if self.branchcache:
919 self.branchtags()
919 self.branchtags()
920
920
921 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
921 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
922 return n
922 return n
923 finally:
923 finally:
924 del tr
924 del tr
925 lock.release()
925 lock.release()
926
926
927 def walk(self, match, node=None):
927 def walk(self, match, node=None):
928 '''
928 '''
929 walk recursively through the directory tree or a given
929 walk recursively through the directory tree or a given
930 changeset, finding all files matched by the match
930 changeset, finding all files matched by the match
931 function
931 function
932 '''
932 '''
933 return self[node].walk(match)
933 return self[node].walk(match)
934
934
935 def status(self, node1='.', node2=None, match=None,
935 def status(self, node1='.', node2=None, match=None,
936 ignored=False, clean=False, unknown=False):
936 ignored=False, clean=False, unknown=False):
937 """return status of files between two nodes or node and working directory
937 """return status of files between two nodes or node and working directory
938
938
939 If node1 is None, use the first dirstate parent instead.
939 If node1 is None, use the first dirstate parent instead.
940 If node2 is None, compare node1 with working directory.
940 If node2 is None, compare node1 with working directory.
941 """
941 """
942
942
943 def mfmatches(ctx):
943 def mfmatches(ctx):
944 mf = ctx.manifest().copy()
944 mf = ctx.manifest().copy()
945 for fn in mf.keys():
945 for fn in mf.keys():
946 if not match(fn):
946 if not match(fn):
947 del mf[fn]
947 del mf[fn]
948 return mf
948 return mf
949
949
950 if isinstance(node1, context.changectx):
950 if isinstance(node1, context.changectx):
951 ctx1 = node1
951 ctx1 = node1
952 else:
952 else:
953 ctx1 = self[node1]
953 ctx1 = self[node1]
954 if isinstance(node2, context.changectx):
954 if isinstance(node2, context.changectx):
955 ctx2 = node2
955 ctx2 = node2
956 else:
956 else:
957 ctx2 = self[node2]
957 ctx2 = self[node2]
958
958
959 working = ctx2.rev() is None
959 working = ctx2.rev() is None
960 parentworking = working and ctx1 == self['.']
960 parentworking = working and ctx1 == self['.']
961 match = match or match_.always(self.root, self.getcwd())
961 match = match or match_.always(self.root, self.getcwd())
962 listignored, listclean, listunknown = ignored, clean, unknown
962 listignored, listclean, listunknown = ignored, clean, unknown
963
963
964 # load earliest manifest first for caching reasons
964 # load earliest manifest first for caching reasons
965 if not working and ctx2.rev() < ctx1.rev():
965 if not working and ctx2.rev() < ctx1.rev():
966 ctx2.manifest()
966 ctx2.manifest()
967
967
968 if not parentworking:
968 if not parentworking:
969 def bad(f, msg):
969 def bad(f, msg):
970 if f not in ctx1:
970 if f not in ctx1:
971 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
971 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
972 match.bad = bad
972 match.bad = bad
973
973
974 if working: # we need to scan the working dir
974 if working: # we need to scan the working dir
975 s = self.dirstate.status(match, listignored, listclean, listunknown)
975 s = self.dirstate.status(match, listignored, listclean, listunknown)
976 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
976 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
977
977
978 # check for any possibly clean files
978 # check for any possibly clean files
979 if parentworking and cmp:
979 if parentworking and cmp:
980 fixup = []
980 fixup = []
981 # do a full compare of any files that might have changed
981 # do a full compare of any files that might have changed
982 for f in sorted(cmp):
982 for f in sorted(cmp):
983 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
983 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
984 or ctx1[f].cmp(ctx2[f].data())):
984 or ctx1[f].cmp(ctx2[f].data())):
985 modified.append(f)
985 modified.append(f)
986 else:
986 else:
987 fixup.append(f)
987 fixup.append(f)
988
988
989 if listclean:
989 if listclean:
990 clean += fixup
990 clean += fixup
991
991
992 # update dirstate for files that are actually clean
992 # update dirstate for files that are actually clean
993 if fixup:
993 if fixup:
994 try:
994 try:
995 # updating the dirstate is optional
995 # updating the dirstate is optional
996 # so we don't wait on the lock
996 # so we don't wait on the lock
997 wlock = self.wlock(False)
997 wlock = self.wlock(False)
998 try:
998 try:
999 for f in fixup:
999 for f in fixup:
1000 self.dirstate.normal(f)
1000 self.dirstate.normal(f)
1001 finally:
1001 finally:
1002 wlock.release()
1002 wlock.release()
1003 except error.LockError:
1003 except error.LockError:
1004 pass
1004 pass
1005
1005
1006 if not parentworking:
1006 if not parentworking:
1007 mf1 = mfmatches(ctx1)
1007 mf1 = mfmatches(ctx1)
1008 if working:
1008 if working:
1009 # we are comparing working dir against non-parent
1009 # we are comparing working dir against non-parent
1010 # generate a pseudo-manifest for the working dir
1010 # generate a pseudo-manifest for the working dir
1011 mf2 = mfmatches(self['.'])
1011 mf2 = mfmatches(self['.'])
1012 for f in cmp + modified + added:
1012 for f in cmp + modified + added:
1013 mf2[f] = None
1013 mf2[f] = None
1014 mf2.set(f, ctx2.flags(f))
1014 mf2.set(f, ctx2.flags(f))
1015 for f in removed:
1015 for f in removed:
1016 if f in mf2:
1016 if f in mf2:
1017 del mf2[f]
1017 del mf2[f]
1018 else:
1018 else:
1019 # we are comparing two revisions
1019 # we are comparing two revisions
1020 deleted, unknown, ignored = [], [], []
1020 deleted, unknown, ignored = [], [], []
1021 mf2 = mfmatches(ctx2)
1021 mf2 = mfmatches(ctx2)
1022
1022
1023 modified, added, clean = [], [], []
1023 modified, added, clean = [], [], []
1024 for fn in mf2:
1024 for fn in mf2:
1025 if fn in mf1:
1025 if fn in mf1:
1026 if (mf1.flags(fn) != mf2.flags(fn) or
1026 if (mf1.flags(fn) != mf2.flags(fn) or
1027 (mf1[fn] != mf2[fn] and
1027 (mf1[fn] != mf2[fn] and
1028 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1028 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1029 modified.append(fn)
1029 modified.append(fn)
1030 elif listclean:
1030 elif listclean:
1031 clean.append(fn)
1031 clean.append(fn)
1032 del mf1[fn]
1032 del mf1[fn]
1033 else:
1033 else:
1034 added.append(fn)
1034 added.append(fn)
1035 removed = mf1.keys()
1035 removed = mf1.keys()
1036
1036
1037 r = modified, added, removed, deleted, unknown, ignored, clean
1037 r = modified, added, removed, deleted, unknown, ignored, clean
1038 [l.sort() for l in r]
1038 [l.sort() for l in r]
1039 return r
1039 return r
1040
1040
1041 def add(self, list):
1041 def add(self, list):
1042 wlock = self.wlock()
1042 wlock = self.wlock()
1043 try:
1043 try:
1044 rejected = []
1044 rejected = []
1045 for f in list:
1045 for f in list:
1046 p = self.wjoin(f)
1046 p = self.wjoin(f)
1047 try:
1047 try:
1048 st = os.lstat(p)
1048 st = os.lstat(p)
1049 except:
1049 except:
1050 self.ui.warn(_("%s does not exist!\n") % f)
1050 self.ui.warn(_("%s does not exist!\n") % f)
1051 rejected.append(f)
1051 rejected.append(f)
1052 continue
1052 continue
1053 if st.st_size > 10000000:
1053 if st.st_size > 10000000:
1054 self.ui.warn(_("%s: files over 10MB may cause memory and"
1054 self.ui.warn(_("%s: files over 10MB may cause memory and"
1055 " performance problems\n"
1055 " performance problems\n"
1056 "(use 'hg revert %s' to unadd the file)\n")
1056 "(use 'hg revert %s' to unadd the file)\n")
1057 % (f, f))
1057 % (f, f))
1058 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1058 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1059 self.ui.warn(_("%s not added: only files and symlinks "
1059 self.ui.warn(_("%s not added: only files and symlinks "
1060 "supported currently\n") % f)
1060 "supported currently\n") % f)
1061 rejected.append(p)
1061 rejected.append(p)
1062 elif self.dirstate[f] in 'amn':
1062 elif self.dirstate[f] in 'amn':
1063 self.ui.warn(_("%s already tracked!\n") % f)
1063 self.ui.warn(_("%s already tracked!\n") % f)
1064 elif self.dirstate[f] == 'r':
1064 elif self.dirstate[f] == 'r':
1065 self.dirstate.normallookup(f)
1065 self.dirstate.normallookup(f)
1066 else:
1066 else:
1067 self.dirstate.add(f)
1067 self.dirstate.add(f)
1068 return rejected
1068 return rejected
1069 finally:
1069 finally:
1070 wlock.release()
1070 wlock.release()
1071
1071
1072 def forget(self, list):
1072 def forget(self, list):
1073 wlock = self.wlock()
1073 wlock = self.wlock()
1074 try:
1074 try:
1075 for f in list:
1075 for f in list:
1076 if self.dirstate[f] != 'a':
1076 if self.dirstate[f] != 'a':
1077 self.ui.warn(_("%s not added!\n") % f)
1077 self.ui.warn(_("%s not added!\n") % f)
1078 else:
1078 else:
1079 self.dirstate.forget(f)
1079 self.dirstate.forget(f)
1080 finally:
1080 finally:
1081 wlock.release()
1081 wlock.release()
1082
1082
1083 def remove(self, list, unlink=False):
1083 def remove(self, list, unlink=False):
1084 if unlink:
1084 if unlink:
1085 for f in list:
1085 for f in list:
1086 try:
1086 try:
1087 util.unlink(self.wjoin(f))
1087 util.unlink(self.wjoin(f))
1088 except OSError, inst:
1088 except OSError, inst:
1089 if inst.errno != errno.ENOENT:
1089 if inst.errno != errno.ENOENT:
1090 raise
1090 raise
1091 wlock = self.wlock()
1091 wlock = self.wlock()
1092 try:
1092 try:
1093 for f in list:
1093 for f in list:
1094 if unlink and os.path.exists(self.wjoin(f)):
1094 if unlink and os.path.exists(self.wjoin(f)):
1095 self.ui.warn(_("%s still exists!\n") % f)
1095 self.ui.warn(_("%s still exists!\n") % f)
1096 elif self.dirstate[f] == 'a':
1096 elif self.dirstate[f] == 'a':
1097 self.dirstate.forget(f)
1097 self.dirstate.forget(f)
1098 elif f not in self.dirstate:
1098 elif f not in self.dirstate:
1099 self.ui.warn(_("%s not tracked!\n") % f)
1099 self.ui.warn(_("%s not tracked!\n") % f)
1100 else:
1100 else:
1101 self.dirstate.remove(f)
1101 self.dirstate.remove(f)
1102 finally:
1102 finally:
1103 wlock.release()
1103 wlock.release()
1104
1104
1105 def undelete(self, list):
1105 def undelete(self, list):
1106 manifests = [self.manifest.read(self.changelog.read(p)[0])
1106 manifests = [self.manifest.read(self.changelog.read(p)[0])
1107 for p in self.dirstate.parents() if p != nullid]
1107 for p in self.dirstate.parents() if p != nullid]
1108 wlock = self.wlock()
1108 wlock = self.wlock()
1109 try:
1109 try:
1110 for f in list:
1110 for f in list:
1111 if self.dirstate[f] != 'r':
1111 if self.dirstate[f] != 'r':
1112 self.ui.warn(_("%s not removed!\n") % f)
1112 self.ui.warn(_("%s not removed!\n") % f)
1113 else:
1113 else:
1114 m = f in manifests[0] and manifests[0] or manifests[1]
1114 m = f in manifests[0] and manifests[0] or manifests[1]
1115 t = self.file(f).read(m[f])
1115 t = self.file(f).read(m[f])
1116 self.wwrite(f, t, m.flags(f))
1116 self.wwrite(f, t, m.flags(f))
1117 self.dirstate.normal(f)
1117 self.dirstate.normal(f)
1118 finally:
1118 finally:
1119 wlock.release()
1119 wlock.release()
1120
1120
1121 def copy(self, source, dest):
1121 def copy(self, source, dest):
1122 p = self.wjoin(dest)
1122 p = self.wjoin(dest)
1123 if not (os.path.exists(p) or os.path.islink(p)):
1123 if not (os.path.exists(p) or os.path.islink(p)):
1124 self.ui.warn(_("%s does not exist!\n") % dest)
1124 self.ui.warn(_("%s does not exist!\n") % dest)
1125 elif not (os.path.isfile(p) or os.path.islink(p)):
1125 elif not (os.path.isfile(p) or os.path.islink(p)):
1126 self.ui.warn(_("copy failed: %s is not a file or a "
1126 self.ui.warn(_("copy failed: %s is not a file or a "
1127 "symbolic link\n") % dest)
1127 "symbolic link\n") % dest)
1128 else:
1128 else:
1129 wlock = self.wlock()
1129 wlock = self.wlock()
1130 try:
1130 try:
1131 if self.dirstate[dest] in '?r':
1131 if self.dirstate[dest] in '?r':
1132 self.dirstate.add(dest)
1132 self.dirstate.add(dest)
1133 self.dirstate.copy(source, dest)
1133 self.dirstate.copy(source, dest)
1134 finally:
1134 finally:
1135 wlock.release()
1135 wlock.release()
1136
1136
1137 def heads(self, start=None, closed=False):
1137 def heads(self, start=None, closed=False):
1138 heads = self.changelog.heads(start)
1138 heads = self.changelog.heads(start)
1139 def display(head):
1139 def display(head):
1140 if closed:
1140 if closed:
1141 return True
1141 return True
1142 extras = self.changelog.read(head)[5]
1142 extras = self.changelog.read(head)[5]
1143 return ('close' not in extras)
1143 return ('close' not in extras)
1144 # sort the output in rev descending order
1144 # sort the output in rev descending order
1145 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1145 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1146 return [n for (r, n) in sorted(heads)]
1146 return [n for (r, n) in sorted(heads)]
1147
1147
1148 def branchheads(self, branch=None, start=None, closed=False):
1148 def branchheads(self, branch=None, start=None, closed=False):
1149 if branch is None:
1149 if branch is None:
1150 branch = self[None].branch()
1150 branch = self[None].branch()
1151 branches = self.branchmap()
1151 branches = self.branchmap()
1152 if branch not in branches:
1152 if branch not in branches:
1153 return []
1153 return []
1154 bheads = branches[branch]
1154 bheads = branches[branch]
1155 # the cache returns heads ordered lowest to highest
1155 # the cache returns heads ordered lowest to highest
1156 bheads.reverse()
1156 bheads.reverse()
1157 if start is not None:
1157 if start is not None:
1158 # filter out the heads that cannot be reached from startrev
1158 # filter out the heads that cannot be reached from startrev
1159 bheads = self.changelog.nodesbetween([start], bheads)[2]
1159 bheads = self.changelog.nodesbetween([start], bheads)[2]
1160 if not closed:
1160 if not closed:
1161 bheads = [h for h in bheads if
1161 bheads = [h for h in bheads if
1162 ('close' not in self.changelog.read(h)[5])]
1162 ('close' not in self.changelog.read(h)[5])]
1163 return bheads
1163 return bheads
1164
1164
1165 def branches(self, nodes):
1165 def branches(self, nodes):
1166 if not nodes:
1166 if not nodes:
1167 nodes = [self.changelog.tip()]
1167 nodes = [self.changelog.tip()]
1168 b = []
1168 b = []
1169 for n in nodes:
1169 for n in nodes:
1170 t = n
1170 t = n
1171 while 1:
1171 while 1:
1172 p = self.changelog.parents(n)
1172 p = self.changelog.parents(n)
1173 if p[1] != nullid or p[0] == nullid:
1173 if p[1] != nullid or p[0] == nullid:
1174 b.append((t, n, p[0], p[1]))
1174 b.append((t, n, p[0], p[1]))
1175 break
1175 break
1176 n = p[0]
1176 n = p[0]
1177 return b
1177 return b
1178
1178
1179 def between(self, pairs):
1179 def between(self, pairs):
1180 r = []
1180 r = []
1181
1181
1182 for top, bottom in pairs:
1182 for top, bottom in pairs:
1183 n, l, i = top, [], 0
1183 n, l, i = top, [], 0
1184 f = 1
1184 f = 1
1185
1185
1186 while n != bottom and n != nullid:
1186 while n != bottom and n != nullid:
1187 p = self.changelog.parents(n)[0]
1187 p = self.changelog.parents(n)[0]
1188 if i == f:
1188 if i == f:
1189 l.append(n)
1189 l.append(n)
1190 f = f * 2
1190 f = f * 2
1191 n = p
1191 n = p
1192 i += 1
1192 i += 1
1193
1193
1194 r.append(l)
1194 r.append(l)
1195
1195
1196 return r
1196 return r
1197
1197
1198 def findincoming(self, remote, base=None, heads=None, force=False):
1198 def findincoming(self, remote, base=None, heads=None, force=False):
1199 """Return list of roots of the subsets of missing nodes from remote
1199 """Return list of roots of the subsets of missing nodes from remote
1200
1200
1201 If base dict is specified, assume that these nodes and their parents
1201 If base dict is specified, assume that these nodes and their parents
1202 exist on the remote side and that no child of a node of base exists
1202 exist on the remote side and that no child of a node of base exists
1203 in both remote and self.
1203 in both remote and self.
1204 Furthermore base will be updated to include the nodes that exists
1204 Furthermore base will be updated to include the nodes that exists
1205 in self and remote but no children exists in self and remote.
1205 in self and remote but no children exists in self and remote.
1206 If a list of heads is specified, return only nodes which are heads
1206 If a list of heads is specified, return only nodes which are heads
1207 or ancestors of these heads.
1207 or ancestors of these heads.
1208
1208
1209 All the ancestors of base are in self and in remote.
1209 All the ancestors of base are in self and in remote.
1210 All the descendants of the list returned are missing in self.
1210 All the descendants of the list returned are missing in self.
1211 (and so we know that the rest of the nodes are missing in remote, see
1211 (and so we know that the rest of the nodes are missing in remote, see
1212 outgoing)
1212 outgoing)
1213 """
1213 """
1214 return self.findcommonincoming(remote, base, heads, force)[1]
1214 return self.findcommonincoming(remote, base, heads, force)[1]
1215
1215
1216 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1216 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1217 """Return a tuple (common, missing roots, heads) used to identify
1217 """Return a tuple (common, missing roots, heads) used to identify
1218 missing nodes from remote.
1218 missing nodes from remote.
1219
1219
1220 If base dict is specified, assume that these nodes and their parents
1220 If base dict is specified, assume that these nodes and their parents
1221 exist on the remote side and that no child of a node of base exists
1221 exist on the remote side and that no child of a node of base exists
1222 in both remote and self.
1222 in both remote and self.
1223 Furthermore base will be updated to include the nodes that exists
1223 Furthermore base will be updated to include the nodes that exists
1224 in self and remote but no children exists in self and remote.
1224 in self and remote but no children exists in self and remote.
1225 If a list of heads is specified, return only nodes which are heads
1225 If a list of heads is specified, return only nodes which are heads
1226 or ancestors of these heads.
1226 or ancestors of these heads.
1227
1227
1228 All the ancestors of base are in self and in remote.
1228 All the ancestors of base are in self and in remote.
1229 """
1229 """
1230 m = self.changelog.nodemap
1230 m = self.changelog.nodemap
1231 search = []
1231 search = []
1232 fetch = set()
1232 fetch = set()
1233 seen = set()
1233 seen = set()
1234 seenbranch = set()
1234 seenbranch = set()
1235 if base is None:
1235 if base is None:
1236 base = {}
1236 base = {}
1237
1237
1238 if not heads:
1238 if not heads:
1239 heads = remote.heads()
1239 heads = remote.heads()
1240
1240
1241 if self.changelog.tip() == nullid:
1241 if self.changelog.tip() == nullid:
1242 base[nullid] = 1
1242 base[nullid] = 1
1243 if heads != [nullid]:
1243 if heads != [nullid]:
1244 return [nullid], [nullid], list(heads)
1244 return [nullid], [nullid], list(heads)
1245 return [nullid], [], []
1245 return [nullid], [], []
1246
1246
1247 # assume we're closer to the tip than the root
1247 # assume we're closer to the tip than the root
1248 # and start by examining the heads
1248 # and start by examining the heads
1249 self.ui.status(_("searching for changes\n"))
1249 self.ui.status(_("searching for changes\n"))
1250
1250
1251 unknown = []
1251 unknown = []
1252 for h in heads:
1252 for h in heads:
1253 if h not in m:
1253 if h not in m:
1254 unknown.append(h)
1254 unknown.append(h)
1255 else:
1255 else:
1256 base[h] = 1
1256 base[h] = 1
1257
1257
1258 heads = unknown
1258 heads = unknown
1259 if not unknown:
1259 if not unknown:
1260 return base.keys(), [], []
1260 return base.keys(), [], []
1261
1261
1262 req = set(unknown)
1262 req = set(unknown)
1263 reqcnt = 0
1263 reqcnt = 0
1264
1264
1265 # search through remote branches
1265 # search through remote branches
1266 # a 'branch' here is a linear segment of history, with four parts:
1266 # a 'branch' here is a linear segment of history, with four parts:
1267 # head, root, first parent, second parent
1267 # head, root, first parent, second parent
1268 # (a branch always has two parents (or none) by definition)
1268 # (a branch always has two parents (or none) by definition)
1269 unknown = remote.branches(unknown)
1269 unknown = remote.branches(unknown)
1270 while unknown:
1270 while unknown:
1271 r = []
1271 r = []
1272 while unknown:
1272 while unknown:
1273 n = unknown.pop(0)
1273 n = unknown.pop(0)
1274 if n[0] in seen:
1274 if n[0] in seen:
1275 continue
1275 continue
1276
1276
1277 self.ui.debug(_("examining %s:%s\n")
1277 self.ui.debug(_("examining %s:%s\n")
1278 % (short(n[0]), short(n[1])))
1278 % (short(n[0]), short(n[1])))
1279 if n[0] == nullid: # found the end of the branch
1279 if n[0] == nullid: # found the end of the branch
1280 pass
1280 pass
1281 elif n in seenbranch:
1281 elif n in seenbranch:
1282 self.ui.debug(_("branch already found\n"))
1282 self.ui.debug(_("branch already found\n"))
1283 continue
1283 continue
1284 elif n[1] and n[1] in m: # do we know the base?
1284 elif n[1] and n[1] in m: # do we know the base?
1285 self.ui.debug(_("found incomplete branch %s:%s\n")
1285 self.ui.debug(_("found incomplete branch %s:%s\n")
1286 % (short(n[0]), short(n[1])))
1286 % (short(n[0]), short(n[1])))
1287 search.append(n[0:2]) # schedule branch range for scanning
1287 search.append(n[0:2]) # schedule branch range for scanning
1288 seenbranch.add(n)
1288 seenbranch.add(n)
1289 else:
1289 else:
1290 if n[1] not in seen and n[1] not in fetch:
1290 if n[1] not in seen and n[1] not in fetch:
1291 if n[2] in m and n[3] in m:
1291 if n[2] in m and n[3] in m:
1292 self.ui.debug(_("found new changeset %s\n") %
1292 self.ui.debug(_("found new changeset %s\n") %
1293 short(n[1]))
1293 short(n[1]))
1294 fetch.add(n[1]) # earliest unknown
1294 fetch.add(n[1]) # earliest unknown
1295 for p in n[2:4]:
1295 for p in n[2:4]:
1296 if p in m:
1296 if p in m:
1297 base[p] = 1 # latest known
1297 base[p] = 1 # latest known
1298
1298
1299 for p in n[2:4]:
1299 for p in n[2:4]:
1300 if p not in req and p not in m:
1300 if p not in req and p not in m:
1301 r.append(p)
1301 r.append(p)
1302 req.add(p)
1302 req.add(p)
1303 seen.add(n[0])
1303 seen.add(n[0])
1304
1304
1305 if r:
1305 if r:
1306 reqcnt += 1
1306 reqcnt += 1
1307 self.ui.debug(_("request %d: %s\n") %
1307 self.ui.debug(_("request %d: %s\n") %
1308 (reqcnt, " ".join(map(short, r))))
1308 (reqcnt, " ".join(map(short, r))))
1309 for p in xrange(0, len(r), 10):
1309 for p in xrange(0, len(r), 10):
1310 for b in remote.branches(r[p:p+10]):
1310 for b in remote.branches(r[p:p+10]):
1311 self.ui.debug(_("received %s:%s\n") %
1311 self.ui.debug(_("received %s:%s\n") %
1312 (short(b[0]), short(b[1])))
1312 (short(b[0]), short(b[1])))
1313 unknown.append(b)
1313 unknown.append(b)
1314
1314
1315 # do binary search on the branches we found
1315 # do binary search on the branches we found
1316 while search:
1316 while search:
1317 newsearch = []
1317 newsearch = []
1318 reqcnt += 1
1318 reqcnt += 1
1319 for n, l in zip(search, remote.between(search)):
1319 for n, l in zip(search, remote.between(search)):
1320 l.append(n[1])
1320 l.append(n[1])
1321 p = n[0]
1321 p = n[0]
1322 f = 1
1322 f = 1
1323 for i in l:
1323 for i in l:
1324 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1324 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1325 if i in m:
1325 if i in m:
1326 if f <= 2:
1326 if f <= 2:
1327 self.ui.debug(_("found new branch changeset %s\n") %
1327 self.ui.debug(_("found new branch changeset %s\n") %
1328 short(p))
1328 short(p))
1329 fetch.add(p)
1329 fetch.add(p)
1330 base[i] = 1
1330 base[i] = 1
1331 else:
1331 else:
1332 self.ui.debug(_("narrowed branch search to %s:%s\n")
1332 self.ui.debug(_("narrowed branch search to %s:%s\n")
1333 % (short(p), short(i)))
1333 % (short(p), short(i)))
1334 newsearch.append((p, i))
1334 newsearch.append((p, i))
1335 break
1335 break
1336 p, f = i, f * 2
1336 p, f = i, f * 2
1337 search = newsearch
1337 search = newsearch
1338
1338
1339 # sanity check our fetch list
1339 # sanity check our fetch list
1340 for f in fetch:
1340 for f in fetch:
1341 if f in m:
1341 if f in m:
1342 raise error.RepoError(_("already have changeset ")
1342 raise error.RepoError(_("already have changeset ")
1343 + short(f[:4]))
1343 + short(f[:4]))
1344
1344
1345 if base.keys() == [nullid]:
1345 if base.keys() == [nullid]:
1346 if force:
1346 if force:
1347 self.ui.warn(_("warning: repository is unrelated\n"))
1347 self.ui.warn(_("warning: repository is unrelated\n"))
1348 else:
1348 else:
1349 raise util.Abort(_("repository is unrelated"))
1349 raise util.Abort(_("repository is unrelated"))
1350
1350
1351 self.ui.debug(_("found new changesets starting at ") +
1351 self.ui.debug(_("found new changesets starting at ") +
1352 " ".join([short(f) for f in fetch]) + "\n")
1352 " ".join([short(f) for f in fetch]) + "\n")
1353
1353
1354 self.ui.debug(_("%d total queries\n") % reqcnt)
1354 self.ui.debug(_("%d total queries\n") % reqcnt)
1355
1355
1356 return base.keys(), list(fetch), heads
1356 return base.keys(), list(fetch), heads
1357
1357
1358 def findoutgoing(self, remote, base=None, heads=None, force=False):
1358 def findoutgoing(self, remote, base=None, heads=None, force=False):
1359 """Return list of nodes that are roots of subsets not in remote
1359 """Return list of nodes that are roots of subsets not in remote
1360
1360
1361 If base dict is specified, assume that these nodes and their parents
1361 If base dict is specified, assume that these nodes and their parents
1362 exist on the remote side.
1362 exist on the remote side.
1363 If a list of heads is specified, return only nodes which are heads
1363 If a list of heads is specified, return only nodes which are heads
1364 or ancestors of these heads, and return a second element which
1364 or ancestors of these heads, and return a second element which
1365 contains all remote heads which get new children.
1365 contains all remote heads which get new children.
1366 """
1366 """
1367 if base is None:
1367 if base is None:
1368 base = {}
1368 base = {}
1369 self.findincoming(remote, base, heads, force=force)
1369 self.findincoming(remote, base, heads, force=force)
1370
1370
1371 self.ui.debug(_("common changesets up to ")
1371 self.ui.debug(_("common changesets up to ")
1372 + " ".join(map(short, base.keys())) + "\n")
1372 + " ".join(map(short, base.keys())) + "\n")
1373
1373
1374 remain = set(self.changelog.nodemap)
1374 remain = set(self.changelog.nodemap)
1375
1375
1376 # prune everything remote has from the tree
1376 # prune everything remote has from the tree
1377 remain.remove(nullid)
1377 remain.remove(nullid)
1378 remove = base.keys()
1378 remove = base.keys()
1379 while remove:
1379 while remove:
1380 n = remove.pop(0)
1380 n = remove.pop(0)
1381 if n in remain:
1381 if n in remain:
1382 remain.remove(n)
1382 remain.remove(n)
1383 for p in self.changelog.parents(n):
1383 for p in self.changelog.parents(n):
1384 remove.append(p)
1384 remove.append(p)
1385
1385
1386 # find every node whose parents have been pruned
1386 # find every node whose parents have been pruned
1387 subset = []
1387 subset = []
1388 # find every remote head that will get new children
1388 # find every remote head that will get new children
1389 updated_heads = set()
1389 updated_heads = set()
1390 for n in remain:
1390 for n in remain:
1391 p1, p2 = self.changelog.parents(n)
1391 p1, p2 = self.changelog.parents(n)
1392 if p1 not in remain and p2 not in remain:
1392 if p1 not in remain and p2 not in remain:
1393 subset.append(n)
1393 subset.append(n)
1394 if heads:
1394 if heads:
1395 if p1 in heads:
1395 if p1 in heads:
1396 updated_heads.add(p1)
1396 updated_heads.add(p1)
1397 if p2 in heads:
1397 if p2 in heads:
1398 updated_heads.add(p2)
1398 updated_heads.add(p2)
1399
1399
1400 # this is the set of all roots we have to push
1400 # this is the set of all roots we have to push
1401 if heads:
1401 if heads:
1402 return subset, list(updated_heads)
1402 return subset, list(updated_heads)
1403 else:
1403 else:
1404 return subset
1404 return subset
1405
1405
1406 def pull(self, remote, heads=None, force=False):
1406 def pull(self, remote, heads=None, force=False):
1407 lock = self.lock()
1407 lock = self.lock()
1408 try:
1408 try:
1409 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1409 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1410 force=force)
1410 force=force)
1411 if fetch == [nullid]:
1411 if fetch == [nullid]:
1412 self.ui.status(_("requesting all changes\n"))
1412 self.ui.status(_("requesting all changes\n"))
1413
1413
1414 if not fetch:
1414 if not fetch:
1415 self.ui.status(_("no changes found\n"))
1415 self.ui.status(_("no changes found\n"))
1416 return 0
1416 return 0
1417
1417
1418 if heads is None and remote.capable('changegroupsubset'):
1418 if heads is None and remote.capable('changegroupsubset'):
1419 heads = rheads
1419 heads = rheads
1420
1420
1421 if heads is None:
1421 if heads is None:
1422 cg = remote.changegroup(fetch, 'pull')
1422 cg = remote.changegroup(fetch, 'pull')
1423 else:
1423 else:
1424 if not remote.capable('changegroupsubset'):
1424 if not remote.capable('changegroupsubset'):
1425 raise util.Abort(_("Partial pull cannot be done because "
1425 raise util.Abort(_("Partial pull cannot be done because "
1426 "other repository doesn't support "
1426 "other repository doesn't support "
1427 "changegroupsubset."))
1427 "changegroupsubset."))
1428 cg = remote.changegroupsubset(fetch, heads, 'pull')
1428 cg = remote.changegroupsubset(fetch, heads, 'pull')
1429 return self.addchangegroup(cg, 'pull', remote.url())
1429 return self.addchangegroup(cg, 'pull', remote.url())
1430 finally:
1430 finally:
1431 lock.release()
1431 lock.release()
1432
1432
1433 def push(self, remote, force=False, revs=None):
1433 def push(self, remote, force=False, revs=None):
1434 # there are two ways to push to remote repo:
1434 # there are two ways to push to remote repo:
1435 #
1435 #
1436 # addchangegroup assumes local user can lock remote
1436 # addchangegroup assumes local user can lock remote
1437 # repo (local filesystem, old ssh servers).
1437 # repo (local filesystem, old ssh servers).
1438 #
1438 #
1439 # unbundle assumes local user cannot lock remote repo (new ssh
1439 # unbundle assumes local user cannot lock remote repo (new ssh
1440 # servers, http servers).
1440 # servers, http servers).
1441
1441
1442 if remote.capable('unbundle'):
1442 if remote.capable('unbundle'):
1443 return self.push_unbundle(remote, force, revs)
1443 return self.push_unbundle(remote, force, revs)
1444 return self.push_addchangegroup(remote, force, revs)
1444 return self.push_addchangegroup(remote, force, revs)
1445
1445
1446 def prepush(self, remote, force, revs):
1446 def prepush(self, remote, force, revs):
1447 common = {}
1447 common = {}
1448 remote_heads = remote.heads()
1448 remote_heads = remote.heads()
1449 inc = self.findincoming(remote, common, remote_heads, force=force)
1449 inc = self.findincoming(remote, common, remote_heads, force=force)
1450
1450
1451 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1451 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1452 if revs is not None:
1452 if revs is not None:
1453 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1453 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1454 else:
1454 else:
1455 bases, heads = update, self.changelog.heads()
1455 bases, heads = update, self.changelog.heads()
1456
1456
1457 def checkbranch(lheads, rheads, updatelh):
1457 def checkbranch(lheads, rheads, updatelh):
1458 '''
1458 '''
1459 check whether there are more local heads than remote heads on
1459 check whether there are more local heads than remote heads on
1460 a specific branch.
1460 a specific branch.
1461
1461
1462 lheads: local branch heads
1462 lheads: local branch heads
1463 rheads: remote branch heads
1463 rheads: remote branch heads
1464 updatelh: outgoing local branch heads
1464 updatelh: outgoing local branch heads
1465 '''
1465 '''
1466
1466
1467 warn = 0
1467 warn = 0
1468
1468
1469 if not revs and len(lheads) > len(rheads):
1469 if not revs and len(lheads) > len(rheads):
1470 warn = 1
1470 warn = 1
1471 else:
1471 else:
1472 updatelheads = [self.changelog.heads(x, lheads)
1472 updatelheads = [self.changelog.heads(x, lheads)
1473 for x in updatelh]
1473 for x in updatelh]
1474 newheads = set(sum(updatelheads, [])) & set(lheads)
1474 newheads = set(sum(updatelheads, [])) & set(lheads)
1475
1475
1476 if not newheads:
1476 if not newheads:
1477 return True
1477 return True
1478
1478
1479 for r in rheads:
1479 for r in rheads:
1480 if r in self.changelog.nodemap:
1480 if r in self.changelog.nodemap:
1481 desc = self.changelog.heads(r, heads)
1481 desc = self.changelog.heads(r, heads)
1482 l = [h for h in heads if h in desc]
1482 l = [h for h in heads if h in desc]
1483 if not l:
1483 if not l:
1484 newheads.add(r)
1484 newheads.add(r)
1485 else:
1485 else:
1486 newheads.add(r)
1486 newheads.add(r)
1487 if len(newheads) > len(rheads):
1487 if len(newheads) > len(rheads):
1488 warn = 1
1488 warn = 1
1489
1489
1490 if warn:
1490 if warn:
1491 if not rheads: # new branch requires --force
1491 if not rheads: # new branch requires --force
1492 self.ui.warn(_("abort: push creates new"
1492 self.ui.warn(_("abort: push creates new"
1493 " remote branch '%s'!\n" %
1493 " remote branch '%s'!\n" %
1494 self[updatelh[0]].branch()))
1494 self[updatelh[0]].branch()))
1495 else:
1495 else:
1496 self.ui.warn(_("abort: push creates new remote heads!\n"))
1496 self.ui.warn(_("abort: push creates new remote heads!\n"))
1497
1497
1498 self.ui.status(_("(did you forget to merge?"
1498 self.ui.status(_("(did you forget to merge?"
1499 " use push -f to force)\n"))
1499 " use push -f to force)\n"))
1500 return False
1500 return False
1501 return True
1501 return True
1502
1502
1503 if not bases:
1503 if not bases:
1504 self.ui.status(_("no changes found\n"))
1504 self.ui.status(_("no changes found\n"))
1505 return None, 1
1505 return None, 1
1506 elif not force:
1506 elif not force:
1507 # Check for each named branch if we're creating new remote heads.
1507 # Check for each named branch if we're creating new remote heads.
1508 # To be a remote head after push, node must be either:
1508 # To be a remote head after push, node must be either:
1509 # - unknown locally
1509 # - unknown locally
1510 # - a local outgoing head descended from update
1510 # - a local outgoing head descended from update
1511 # - a remote head that's known locally and not
1511 # - a remote head that's known locally and not
1512 # ancestral to an outgoing head
1512 # ancestral to an outgoing head
1513 #
1513 #
1514 # New named branches cannot be created without --force.
1514 # New named branches cannot be created without --force.
1515
1515
1516 if remote_heads != [nullid]:
1516 if remote_heads != [nullid]:
1517 if remote.capable('branchmap'):
1517 if remote.capable('branchmap'):
1518 localhds = {}
1518 localhds = {}
1519 if not revs:
1519 if not revs:
1520 localhds = self.branchmap()
1520 localhds = self.branchmap()
1521 else:
1521 else:
1522 for n in heads:
1522 for n in heads:
1523 branch = self[n].branch()
1523 branch = self[n].branch()
1524 if branch in localhds:
1524 if branch in localhds:
1525 localhds[branch].append(n)
1525 localhds[branch].append(n)
1526 else:
1526 else:
1527 localhds[branch] = [n]
1527 localhds[branch] = [n]
1528
1528
1529 remotehds = remote.branchmap()
1529 remotehds = remote.branchmap()
1530
1530
1531 for lh in localhds:
1531 for lh in localhds:
1532 if lh in remotehds:
1532 if lh in remotehds:
1533 rheads = remotehds[lh]
1533 rheads = remotehds[lh]
1534 else:
1534 else:
1535 rheads = []
1535 rheads = []
1536 lheads = localhds[lh]
1536 lheads = localhds[lh]
1537 updatelh = [upd for upd in update
1537 updatelh = [upd for upd in update
1538 if self[upd].branch() == lh]
1538 if self[upd].branch() == lh]
1539 if not updatelh:
1539 if not updatelh:
1540 continue
1540 continue
1541 if not checkbranch(lheads, rheads, updatelh):
1541 if not checkbranch(lheads, rheads, updatelh):
1542 return None, 0
1542 return None, 0
1543 else:
1543 else:
1544 if not checkbranch(heads, remote_heads, update):
1544 if not checkbranch(heads, remote_heads, update):
1545 return None, 0
1545 return None, 0
1546
1546
1547 if inc:
1547 if inc:
1548 self.ui.warn(_("note: unsynced remote changes!\n"))
1548 self.ui.warn(_("note: unsynced remote changes!\n"))
1549
1549
1550
1550
1551 if revs is None:
1551 if revs is None:
1552 # use the fast path, no race possible on push
1552 # use the fast path, no race possible on push
1553 cg = self._changegroup(common.keys(), 'push')
1553 cg = self._changegroup(common.keys(), 'push')
1554 else:
1554 else:
1555 cg = self.changegroupsubset(update, revs, 'push')
1555 cg = self.changegroupsubset(update, revs, 'push')
1556 return cg, remote_heads
1556 return cg, remote_heads
1557
1557
1558 def push_addchangegroup(self, remote, force, revs):
1558 def push_addchangegroup(self, remote, force, revs):
1559 lock = remote.lock()
1559 lock = remote.lock()
1560 try:
1560 try:
1561 ret = self.prepush(remote, force, revs)
1561 ret = self.prepush(remote, force, revs)
1562 if ret[0] is not None:
1562 if ret[0] is not None:
1563 cg, remote_heads = ret
1563 cg, remote_heads = ret
1564 return remote.addchangegroup(cg, 'push', self.url())
1564 return remote.addchangegroup(cg, 'push', self.url())
1565 return ret[1]
1565 return ret[1]
1566 finally:
1566 finally:
1567 lock.release()
1567 lock.release()
1568
1568
1569 def push_unbundle(self, remote, force, revs):
1569 def push_unbundle(self, remote, force, revs):
1570 # local repo finds heads on server, finds out what revs it
1570 # local repo finds heads on server, finds out what revs it
1571 # must push. once revs transferred, if server finds it has
1571 # must push. once revs transferred, if server finds it has
1572 # different heads (someone else won commit/push race), server
1572 # different heads (someone else won commit/push race), server
1573 # aborts.
1573 # aborts.
1574
1574
1575 ret = self.prepush(remote, force, revs)
1575 ret = self.prepush(remote, force, revs)
1576 if ret[0] is not None:
1576 if ret[0] is not None:
1577 cg, remote_heads = ret
1577 cg, remote_heads = ret
1578 if force: remote_heads = ['force']
1578 if force: remote_heads = ['force']
1579 return remote.unbundle(cg, remote_heads, 'push')
1579 return remote.unbundle(cg, remote_heads, 'push')
1580 return ret[1]
1580 return ret[1]
1581
1581
1582 def changegroupinfo(self, nodes, source):
1582 def changegroupinfo(self, nodes, source):
1583 if self.ui.verbose or source == 'bundle':
1583 if self.ui.verbose or source == 'bundle':
1584 self.ui.status(_("%d changesets found\n") % len(nodes))
1584 self.ui.status(_("%d changesets found\n") % len(nodes))
1585 if self.ui.debugflag:
1585 if self.ui.debugflag:
1586 self.ui.debug(_("list of changesets:\n"))
1586 self.ui.debug(_("list of changesets:\n"))
1587 for node in nodes:
1587 for node in nodes:
1588 self.ui.debug("%s\n" % hex(node))
1588 self.ui.debug("%s\n" % hex(node))
1589
1589
1590 def changegroupsubset(self, bases, heads, source, extranodes=None):
1590 def changegroupsubset(self, bases, heads, source, extranodes=None):
1591 """This function generates a changegroup consisting of all the nodes
1591 """This function generates a changegroup consisting of all the nodes
1592 that are descendents of any of the bases, and ancestors of any of
1592 that are descendents of any of the bases, and ancestors of any of
1593 the heads.
1593 the heads.
1594
1594
1595 It is fairly complex as determining which filenodes and which
1595 It is fairly complex as determining which filenodes and which
1596 manifest nodes need to be included for the changeset to be complete
1596 manifest nodes need to be included for the changeset to be complete
1597 is non-trivial.
1597 is non-trivial.
1598
1598
1599 Another wrinkle is doing the reverse, figuring out which changeset in
1599 Another wrinkle is doing the reverse, figuring out which changeset in
1600 the changegroup a particular filenode or manifestnode belongs to.
1600 the changegroup a particular filenode or manifestnode belongs to.
1601
1601
1602 The caller can specify some nodes that must be included in the
1602 The caller can specify some nodes that must be included in the
1603 changegroup using the extranodes argument. It should be a dict
1603 changegroup using the extranodes argument. It should be a dict
1604 where the keys are the filenames (or 1 for the manifest), and the
1604 where the keys are the filenames (or 1 for the manifest), and the
1605 values are lists of (node, linknode) tuples, where node is a wanted
1605 values are lists of (node, linknode) tuples, where node is a wanted
1606 node and linknode is the changelog node that should be transmitted as
1606 node and linknode is the changelog node that should be transmitted as
1607 the linkrev.
1607 the linkrev.
1608 """
1608 """
1609
1609
1610 if extranodes is None:
1610 if extranodes is None:
1611 # can we go through the fast path ?
1611 # can we go through the fast path ?
1612 heads.sort()
1612 heads.sort()
1613 allheads = self.heads()
1613 allheads = self.heads()
1614 allheads.sort()
1614 allheads.sort()
1615 if heads == allheads:
1615 if heads == allheads:
1616 common = []
1616 common = []
1617 # parents of bases are known from both sides
1617 # parents of bases are known from both sides
1618 for n in bases:
1618 for n in bases:
1619 for p in self.changelog.parents(n):
1619 for p in self.changelog.parents(n):
1620 if p != nullid:
1620 if p != nullid:
1621 common.append(p)
1621 common.append(p)
1622 return self._changegroup(common, source)
1622 return self._changegroup(common, source)
1623
1623
1624 self.hook('preoutgoing', throw=True, source=source)
1624 self.hook('preoutgoing', throw=True, source=source)
1625
1625
1626 # Set up some initial variables
1626 # Set up some initial variables
1627 # Make it easy to refer to self.changelog
1627 # Make it easy to refer to self.changelog
1628 cl = self.changelog
1628 cl = self.changelog
1629 # msng is short for missing - compute the list of changesets in this
1629 # msng is short for missing - compute the list of changesets in this
1630 # changegroup.
1630 # changegroup.
1631 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1631 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1632 self.changegroupinfo(msng_cl_lst, source)
1632 self.changegroupinfo(msng_cl_lst, source)
1633 # Some bases may turn out to be superfluous, and some heads may be
1633 # Some bases may turn out to be superfluous, and some heads may be
1634 # too. nodesbetween will return the minimal set of bases and heads
1634 # too. nodesbetween will return the minimal set of bases and heads
1635 # necessary to re-create the changegroup.
1635 # necessary to re-create the changegroup.
1636
1636
1637 # Known heads are the list of heads that it is assumed the recipient
1637 # Known heads are the list of heads that it is assumed the recipient
1638 # of this changegroup will know about.
1638 # of this changegroup will know about.
1639 knownheads = set()
1639 knownheads = set()
1640 # We assume that all parents of bases are known heads.
1640 # We assume that all parents of bases are known heads.
1641 for n in bases:
1641 for n in bases:
1642 knownheads.update(cl.parents(n))
1642 knownheads.update(cl.parents(n))
1643 knownheads.discard(nullid)
1643 knownheads.discard(nullid)
1644 knownheads = list(knownheads)
1644 knownheads = list(knownheads)
1645 if knownheads:
1645 if knownheads:
1646 # Now that we know what heads are known, we can compute which
1646 # Now that we know what heads are known, we can compute which
1647 # changesets are known. The recipient must know about all
1647 # changesets are known. The recipient must know about all
1648 # changesets required to reach the known heads from the null
1648 # changesets required to reach the known heads from the null
1649 # changeset.
1649 # changeset.
1650 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1650 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1651 junk = None
1651 junk = None
1652 # Transform the list into a set.
1652 # Transform the list into a set.
1653 has_cl_set = set(has_cl_set)
1653 has_cl_set = set(has_cl_set)
1654 else:
1654 else:
1655 # If there were no known heads, the recipient cannot be assumed to
1655 # If there were no known heads, the recipient cannot be assumed to
1656 # know about any changesets.
1656 # know about any changesets.
1657 has_cl_set = set()
1657 has_cl_set = set()
1658
1658
1659 # Make it easy to refer to self.manifest
1659 # Make it easy to refer to self.manifest
1660 mnfst = self.manifest
1660 mnfst = self.manifest
1661 # We don't know which manifests are missing yet
1661 # We don't know which manifests are missing yet
1662 msng_mnfst_set = {}
1662 msng_mnfst_set = {}
1663 # Nor do we know which filenodes are missing.
1663 # Nor do we know which filenodes are missing.
1664 msng_filenode_set = {}
1664 msng_filenode_set = {}
1665
1665
1666 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1666 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1667 junk = None
1667 junk = None
1668
1668
1669 # A changeset always belongs to itself, so the changenode lookup
1669 # A changeset always belongs to itself, so the changenode lookup
1670 # function for a changenode is identity.
1670 # function for a changenode is identity.
1671 def identity(x):
1671 def identity(x):
1672 return x
1672 return x
1673
1673
1674 # A function generating function. Sets up an environment for the
1674 # A function generating function. Sets up an environment for the
1675 # inner function.
1675 # inner function.
1676 def cmp_by_rev_func(revlog):
1676 def cmp_by_rev_func(revlog):
1677 # Compare two nodes by their revision number in the environment's
1677 # Compare two nodes by their revision number in the environment's
1678 # revision history. Since the revision number both represents the
1678 # revision history. Since the revision number both represents the
1679 # most efficient order to read the nodes in, and represents a
1679 # most efficient order to read the nodes in, and represents a
1680 # topological sorting of the nodes, this function is often useful.
1680 # topological sorting of the nodes, this function is often useful.
1681 def cmp_by_rev(a, b):
1681 def cmp_by_rev(a, b):
1682 return cmp(revlog.rev(a), revlog.rev(b))
1682 return cmp(revlog.rev(a), revlog.rev(b))
1683 return cmp_by_rev
1683 return cmp_by_rev
1684
1684
1685 # If we determine that a particular file or manifest node must be a
1685 # If we determine that a particular file or manifest node must be a
1686 # node that the recipient of the changegroup will already have, we can
1686 # node that the recipient of the changegroup will already have, we can
1687 # also assume the recipient will have all the parents. This function
1687 # also assume the recipient will have all the parents. This function
1688 # prunes them from the set of missing nodes.
1688 # prunes them from the set of missing nodes.
1689 def prune_parents(revlog, hasset, msngset):
1689 def prune_parents(revlog, hasset, msngset):
1690 haslst = list(hasset)
1690 haslst = list(hasset)
1691 haslst.sort(cmp_by_rev_func(revlog))
1691 haslst.sort(cmp_by_rev_func(revlog))
1692 for node in haslst:
1692 for node in haslst:
1693 parentlst = [p for p in revlog.parents(node) if p != nullid]
1693 parentlst = [p for p in revlog.parents(node) if p != nullid]
1694 while parentlst:
1694 while parentlst:
1695 n = parentlst.pop()
1695 n = parentlst.pop()
1696 if n not in hasset:
1696 if n not in hasset:
1697 hasset.add(n)
1697 hasset.add(n)
1698 p = [p for p in revlog.parents(n) if p != nullid]
1698 p = [p for p in revlog.parents(n) if p != nullid]
1699 parentlst.extend(p)
1699 parentlst.extend(p)
1700 for n in hasset:
1700 for n in hasset:
1701 msngset.pop(n, None)
1701 msngset.pop(n, None)
1702
1702
1703 # This is a function generating function used to set up an environment
1703 # This is a function generating function used to set up an environment
1704 # for the inner function to execute in.
1704 # for the inner function to execute in.
1705 def manifest_and_file_collector(changedfileset):
1705 def manifest_and_file_collector(changedfileset):
1706 # This is an information gathering function that gathers
1706 # This is an information gathering function that gathers
1707 # information from each changeset node that goes out as part of
1707 # information from each changeset node that goes out as part of
1708 # the changegroup. The information gathered is a list of which
1708 # the changegroup. The information gathered is a list of which
1709 # manifest nodes are potentially required (the recipient may
1709 # manifest nodes are potentially required (the recipient may
1710 # already have them) and total list of all files which were
1710 # already have them) and total list of all files which were
1711 # changed in any changeset in the changegroup.
1711 # changed in any changeset in the changegroup.
1712 #
1712 #
1713 # We also remember the first changenode we saw any manifest
1713 # We also remember the first changenode we saw any manifest
1714 # referenced by so we can later determine which changenode 'owns'
1714 # referenced by so we can later determine which changenode 'owns'
1715 # the manifest.
1715 # the manifest.
1716 def collect_manifests_and_files(clnode):
1716 def collect_manifests_and_files(clnode):
1717 c = cl.read(clnode)
1717 c = cl.read(clnode)
1718 for f in c[3]:
1718 for f in c[3]:
1719 # This is to make sure we only have one instance of each
1719 # This is to make sure we only have one instance of each
1720 # filename string for each filename.
1720 # filename string for each filename.
1721 changedfileset.setdefault(f, f)
1721 changedfileset.setdefault(f, f)
1722 msng_mnfst_set.setdefault(c[0], clnode)
1722 msng_mnfst_set.setdefault(c[0], clnode)
1723 return collect_manifests_and_files
1723 return collect_manifests_and_files
1724
1724
1725 # Figure out which manifest nodes (of the ones we think might be part
1725 # Figure out which manifest nodes (of the ones we think might be part
1726 # of the changegroup) the recipient must know about and remove them
1726 # of the changegroup) the recipient must know about and remove them
1727 # from the changegroup.
1727 # from the changegroup.
1728 def prune_manifests():
1728 def prune_manifests():
1729 has_mnfst_set = set()
1729 has_mnfst_set = set()
1730 for n in msng_mnfst_set:
1730 for n in msng_mnfst_set:
1731 # If a 'missing' manifest thinks it belongs to a changenode
1731 # If a 'missing' manifest thinks it belongs to a changenode
1732 # the recipient is assumed to have, obviously the recipient
1732 # the recipient is assumed to have, obviously the recipient
1733 # must have that manifest.
1733 # must have that manifest.
1734 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1734 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1735 if linknode in has_cl_set:
1735 if linknode in has_cl_set:
1736 has_mnfst_set.add(n)
1736 has_mnfst_set.add(n)
1737 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1737 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1738
1738
1739 # Use the information collected in collect_manifests_and_files to say
1739 # Use the information collected in collect_manifests_and_files to say
1740 # which changenode any manifestnode belongs to.
1740 # which changenode any manifestnode belongs to.
1741 def lookup_manifest_link(mnfstnode):
1741 def lookup_manifest_link(mnfstnode):
1742 return msng_mnfst_set[mnfstnode]
1742 return msng_mnfst_set[mnfstnode]
1743
1743
1744 # A function generating function that sets up the initial environment
1744 # A function generating function that sets up the initial environment
1745 # the inner function.
1745 # the inner function.
1746 def filenode_collector(changedfiles):
1746 def filenode_collector(changedfiles):
1747 next_rev = [0]
1747 next_rev = [0]
1748 # This gathers information from each manifestnode included in the
1748 # This gathers information from each manifestnode included in the
1749 # changegroup about which filenodes the manifest node references
1749 # changegroup about which filenodes the manifest node references
1750 # so we can include those in the changegroup too.
1750 # so we can include those in the changegroup too.
1751 #
1751 #
1752 # It also remembers which changenode each filenode belongs to. It
1752 # It also remembers which changenode each filenode belongs to. It
1753 # does this by assuming the a filenode belongs to the changenode
1753 # does this by assuming the a filenode belongs to the changenode
1754 # the first manifest that references it belongs to.
1754 # the first manifest that references it belongs to.
1755 def collect_msng_filenodes(mnfstnode):
1755 def collect_msng_filenodes(mnfstnode):
1756 r = mnfst.rev(mnfstnode)
1756 r = mnfst.rev(mnfstnode)
1757 if r == next_rev[0]:
1757 if r == next_rev[0]:
1758 # If the last rev we looked at was the one just previous,
1758 # If the last rev we looked at was the one just previous,
1759 # we only need to see a diff.
1759 # we only need to see a diff.
1760 deltamf = mnfst.readdelta(mnfstnode)
1760 deltamf = mnfst.readdelta(mnfstnode)
1761 # For each line in the delta
1761 # For each line in the delta
1762 for f, fnode in deltamf.iteritems():
1762 for f, fnode in deltamf.iteritems():
1763 f = changedfiles.get(f, None)
1763 f = changedfiles.get(f, None)
1764 # And if the file is in the list of files we care
1764 # And if the file is in the list of files we care
1765 # about.
1765 # about.
1766 if f is not None:
1766 if f is not None:
1767 # Get the changenode this manifest belongs to
1767 # Get the changenode this manifest belongs to
1768 clnode = msng_mnfst_set[mnfstnode]
1768 clnode = msng_mnfst_set[mnfstnode]
1769 # Create the set of filenodes for the file if
1769 # Create the set of filenodes for the file if
1770 # there isn't one already.
1770 # there isn't one already.
1771 ndset = msng_filenode_set.setdefault(f, {})
1771 ndset = msng_filenode_set.setdefault(f, {})
1772 # And set the filenode's changelog node to the
1772 # And set the filenode's changelog node to the
1773 # manifest's if it hasn't been set already.
1773 # manifest's if it hasn't been set already.
1774 ndset.setdefault(fnode, clnode)
1774 ndset.setdefault(fnode, clnode)
1775 else:
1775 else:
1776 # Otherwise we need a full manifest.
1776 # Otherwise we need a full manifest.
1777 m = mnfst.read(mnfstnode)
1777 m = mnfst.read(mnfstnode)
1778 # For every file in we care about.
1778 # For every file in we care about.
1779 for f in changedfiles:
1779 for f in changedfiles:
1780 fnode = m.get(f, None)
1780 fnode = m.get(f, None)
1781 # If it's in the manifest
1781 # If it's in the manifest
1782 if fnode is not None:
1782 if fnode is not None:
1783 # See comments above.
1783 # See comments above.
1784 clnode = msng_mnfst_set[mnfstnode]
1784 clnode = msng_mnfst_set[mnfstnode]
1785 ndset = msng_filenode_set.setdefault(f, {})
1785 ndset = msng_filenode_set.setdefault(f, {})
1786 ndset.setdefault(fnode, clnode)
1786 ndset.setdefault(fnode, clnode)
1787 # Remember the revision we hope to see next.
1787 # Remember the revision we hope to see next.
1788 next_rev[0] = r + 1
1788 next_rev[0] = r + 1
1789 return collect_msng_filenodes
1789 return collect_msng_filenodes
1790
1790
1791 # We have a list of filenodes we think we need for a file, lets remove
1791 # We have a list of filenodes we think we need for a file, lets remove
1792 # all those we know the recipient must have.
1792 # all those we know the recipient must have.
1793 def prune_filenodes(f, filerevlog):
1793 def prune_filenodes(f, filerevlog):
1794 msngset = msng_filenode_set[f]
1794 msngset = msng_filenode_set[f]
1795 hasset = set()
1795 hasset = set()
1796 # If a 'missing' filenode thinks it belongs to a changenode we
1796 # If a 'missing' filenode thinks it belongs to a changenode we
1797 # assume the recipient must have, then the recipient must have
1797 # assume the recipient must have, then the recipient must have
1798 # that filenode.
1798 # that filenode.
1799 for n in msngset:
1799 for n in msngset:
1800 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1800 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1801 if clnode in has_cl_set:
1801 if clnode in has_cl_set:
1802 hasset.add(n)
1802 hasset.add(n)
1803 prune_parents(filerevlog, hasset, msngset)
1803 prune_parents(filerevlog, hasset, msngset)
1804
1804
1805 # A function generator function that sets up the a context for the
1805 # A function generator function that sets up the a context for the
1806 # inner function.
1806 # inner function.
1807 def lookup_filenode_link_func(fname):
1807 def lookup_filenode_link_func(fname):
1808 msngset = msng_filenode_set[fname]
1808 msngset = msng_filenode_set[fname]
1809 # Lookup the changenode the filenode belongs to.
1809 # Lookup the changenode the filenode belongs to.
1810 def lookup_filenode_link(fnode):
1810 def lookup_filenode_link(fnode):
1811 return msngset[fnode]
1811 return msngset[fnode]
1812 return lookup_filenode_link
1812 return lookup_filenode_link
1813
1813
1814 # Add the nodes that were explicitly requested.
1814 # Add the nodes that were explicitly requested.
1815 def add_extra_nodes(name, nodes):
1815 def add_extra_nodes(name, nodes):
1816 if not extranodes or name not in extranodes:
1816 if not extranodes or name not in extranodes:
1817 return
1817 return
1818
1818
1819 for node, linknode in extranodes[name]:
1819 for node, linknode in extranodes[name]:
1820 if node not in nodes:
1820 if node not in nodes:
1821 nodes[node] = linknode
1821 nodes[node] = linknode
1822
1822
1823 # Now that we have all theses utility functions to help out and
1823 # Now that we have all theses utility functions to help out and
1824 # logically divide up the task, generate the group.
1824 # logically divide up the task, generate the group.
1825 def gengroup():
1825 def gengroup():
1826 # The set of changed files starts empty.
1826 # The set of changed files starts empty.
1827 changedfiles = {}
1827 changedfiles = {}
1828 # Create a changenode group generator that will call our functions
1828 # Create a changenode group generator that will call our functions
1829 # back to lookup the owning changenode and collect information.
1829 # back to lookup the owning changenode and collect information.
1830 group = cl.group(msng_cl_lst, identity,
1830 group = cl.group(msng_cl_lst, identity,
1831 manifest_and_file_collector(changedfiles))
1831 manifest_and_file_collector(changedfiles))
1832 for chnk in group:
1832 for chnk in group:
1833 yield chnk
1833 yield chnk
1834
1834
1835 # The list of manifests has been collected by the generator
1835 # The list of manifests has been collected by the generator
1836 # calling our functions back.
1836 # calling our functions back.
1837 prune_manifests()
1837 prune_manifests()
1838 add_extra_nodes(1, msng_mnfst_set)
1838 add_extra_nodes(1, msng_mnfst_set)
1839 msng_mnfst_lst = msng_mnfst_set.keys()
1839 msng_mnfst_lst = msng_mnfst_set.keys()
1840 # Sort the manifestnodes by revision number.
1840 # Sort the manifestnodes by revision number.
1841 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1841 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1842 # Create a generator for the manifestnodes that calls our lookup
1842 # Create a generator for the manifestnodes that calls our lookup
1843 # and data collection functions back.
1843 # and data collection functions back.
1844 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1844 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1845 filenode_collector(changedfiles))
1845 filenode_collector(changedfiles))
1846 for chnk in group:
1846 for chnk in group:
1847 yield chnk
1847 yield chnk
1848
1848
1849 # These are no longer needed, dereference and toss the memory for
1849 # These are no longer needed, dereference and toss the memory for
1850 # them.
1850 # them.
1851 msng_mnfst_lst = None
1851 msng_mnfst_lst = None
1852 msng_mnfst_set.clear()
1852 msng_mnfst_set.clear()
1853
1853
1854 if extranodes:
1854 if extranodes:
1855 for fname in extranodes:
1855 for fname in extranodes:
1856 if isinstance(fname, int):
1856 if isinstance(fname, int):
1857 continue
1857 continue
1858 msng_filenode_set.setdefault(fname, {})
1858 msng_filenode_set.setdefault(fname, {})
1859 changedfiles[fname] = 1
1859 changedfiles[fname] = 1
1860 # Go through all our files in order sorted by name.
1860 # Go through all our files in order sorted by name.
1861 for fname in sorted(changedfiles):
1861 for fname in sorted(changedfiles):
1862 filerevlog = self.file(fname)
1862 filerevlog = self.file(fname)
1863 if not len(filerevlog):
1863 if not len(filerevlog):
1864 raise util.Abort(_("empty or missing revlog for %s") % fname)
1864 raise util.Abort(_("empty or missing revlog for %s") % fname)
1865 # Toss out the filenodes that the recipient isn't really
1865 # Toss out the filenodes that the recipient isn't really
1866 # missing.
1866 # missing.
1867 if fname in msng_filenode_set:
1867 if fname in msng_filenode_set:
1868 prune_filenodes(fname, filerevlog)
1868 prune_filenodes(fname, filerevlog)
1869 add_extra_nodes(fname, msng_filenode_set[fname])
1869 add_extra_nodes(fname, msng_filenode_set[fname])
1870 msng_filenode_lst = msng_filenode_set[fname].keys()
1870 msng_filenode_lst = msng_filenode_set[fname].keys()
1871 else:
1871 else:
1872 msng_filenode_lst = []
1872 msng_filenode_lst = []
1873 # If any filenodes are left, generate the group for them,
1873 # If any filenodes are left, generate the group for them,
1874 # otherwise don't bother.
1874 # otherwise don't bother.
1875 if len(msng_filenode_lst) > 0:
1875 if len(msng_filenode_lst) > 0:
1876 yield changegroup.chunkheader(len(fname))
1876 yield changegroup.chunkheader(len(fname))
1877 yield fname
1877 yield fname
1878 # Sort the filenodes by their revision #
1878 # Sort the filenodes by their revision #
1879 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1879 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1880 # Create a group generator and only pass in a changenode
1880 # Create a group generator and only pass in a changenode
1881 # lookup function as we need to collect no information
1881 # lookup function as we need to collect no information
1882 # from filenodes.
1882 # from filenodes.
1883 group = filerevlog.group(msng_filenode_lst,
1883 group = filerevlog.group(msng_filenode_lst,
1884 lookup_filenode_link_func(fname))
1884 lookup_filenode_link_func(fname))
1885 for chnk in group:
1885 for chnk in group:
1886 yield chnk
1886 yield chnk
1887 if fname in msng_filenode_set:
1887 if fname in msng_filenode_set:
1888 # Don't need this anymore, toss it to free memory.
1888 # Don't need this anymore, toss it to free memory.
1889 del msng_filenode_set[fname]
1889 del msng_filenode_set[fname]
1890 # Signal that no more groups are left.
1890 # Signal that no more groups are left.
1891 yield changegroup.closechunk()
1891 yield changegroup.closechunk()
1892
1892
1893 if msng_cl_lst:
1893 if msng_cl_lst:
1894 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1894 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1895
1895
1896 return util.chunkbuffer(gengroup())
1896 return util.chunkbuffer(gengroup())
1897
1897
1898 def changegroup(self, basenodes, source):
1898 def changegroup(self, basenodes, source):
1899 # to avoid a race we use changegroupsubset() (issue1320)
1899 # to avoid a race we use changegroupsubset() (issue1320)
1900 return self.changegroupsubset(basenodes, self.heads(), source)
1900 return self.changegroupsubset(basenodes, self.heads(), source)
1901
1901
1902 def _changegroup(self, common, source):
1902 def _changegroup(self, common, source):
1903 """Generate a changegroup of all nodes that we have that a recipient
1903 """Generate a changegroup of all nodes that we have that a recipient
1904 doesn't.
1904 doesn't.
1905
1905
1906 This is much easier than the previous function as we can assume that
1906 This is much easier than the previous function as we can assume that
1907 the recipient has any changenode we aren't sending them.
1907 the recipient has any changenode we aren't sending them.
1908
1908
1909 common is the set of common nodes between remote and self"""
1909 common is the set of common nodes between remote and self"""
1910
1910
1911 self.hook('preoutgoing', throw=True, source=source)
1911 self.hook('preoutgoing', throw=True, source=source)
1912
1912
1913 cl = self.changelog
1913 cl = self.changelog
1914 nodes = cl.findmissing(common)
1914 nodes = cl.findmissing(common)
1915 revset = set([cl.rev(n) for n in nodes])
1915 revset = set([cl.rev(n) for n in nodes])
1916 self.changegroupinfo(nodes, source)
1916 self.changegroupinfo(nodes, source)
1917
1917
1918 def identity(x):
1918 def identity(x):
1919 return x
1919 return x
1920
1920
1921 def gennodelst(log):
1921 def gennodelst(log):
1922 for r in log:
1922 for r in log:
1923 if log.linkrev(r) in revset:
1923 if log.linkrev(r) in revset:
1924 yield log.node(r)
1924 yield log.node(r)
1925
1925
1926 def changed_file_collector(changedfileset):
1926 def changed_file_collector(changedfileset):
1927 def collect_changed_files(clnode):
1927 def collect_changed_files(clnode):
1928 c = cl.read(clnode)
1928 c = cl.read(clnode)
1929 changedfileset.update(c[3])
1929 changedfileset.update(c[3])
1930 return collect_changed_files
1930 return collect_changed_files
1931
1931
1932 def lookuprevlink_func(revlog):
1932 def lookuprevlink_func(revlog):
1933 def lookuprevlink(n):
1933 def lookuprevlink(n):
1934 return cl.node(revlog.linkrev(revlog.rev(n)))
1934 return cl.node(revlog.linkrev(revlog.rev(n)))
1935 return lookuprevlink
1935 return lookuprevlink
1936
1936
1937 def gengroup():
1937 def gengroup():
1938 # construct a list of all changed files
1938 # construct a list of all changed files
1939 changedfiles = set()
1939 changedfiles = set()
1940
1940
1941 for chnk in cl.group(nodes, identity,
1941 for chnk in cl.group(nodes, identity,
1942 changed_file_collector(changedfiles)):
1942 changed_file_collector(changedfiles)):
1943 yield chnk
1943 yield chnk
1944
1944
1945 mnfst = self.manifest
1945 mnfst = self.manifest
1946 nodeiter = gennodelst(mnfst)
1946 nodeiter = gennodelst(mnfst)
1947 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1947 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1948 yield chnk
1948 yield chnk
1949
1949
1950 for fname in sorted(changedfiles):
1950 for fname in sorted(changedfiles):
1951 filerevlog = self.file(fname)
1951 filerevlog = self.file(fname)
1952 if not len(filerevlog):
1952 if not len(filerevlog):
1953 raise util.Abort(_("empty or missing revlog for %s") % fname)
1953 raise util.Abort(_("empty or missing revlog for %s") % fname)
1954 nodeiter = gennodelst(filerevlog)
1954 nodeiter = gennodelst(filerevlog)
1955 nodeiter = list(nodeiter)
1955 nodeiter = list(nodeiter)
1956 if nodeiter:
1956 if nodeiter:
1957 yield changegroup.chunkheader(len(fname))
1957 yield changegroup.chunkheader(len(fname))
1958 yield fname
1958 yield fname
1959 lookup = lookuprevlink_func(filerevlog)
1959 lookup = lookuprevlink_func(filerevlog)
1960 for chnk in filerevlog.group(nodeiter, lookup):
1960 for chnk in filerevlog.group(nodeiter, lookup):
1961 yield chnk
1961 yield chnk
1962
1962
1963 yield changegroup.closechunk()
1963 yield changegroup.closechunk()
1964
1964
1965 if nodes:
1965 if nodes:
1966 self.hook('outgoing', node=hex(nodes[0]), source=source)
1966 self.hook('outgoing', node=hex(nodes[0]), source=source)
1967
1967
1968 return util.chunkbuffer(gengroup())
1968 return util.chunkbuffer(gengroup())
1969
1969
1970 def addchangegroup(self, source, srctype, url, emptyok=False):
1970 def addchangegroup(self, source, srctype, url, emptyok=False):
1971 """add changegroup to repo.
1971 """add changegroup to repo.
1972
1972
1973 return values:
1973 return values:
1974 - nothing changed or no source: 0
1974 - nothing changed or no source: 0
1975 - more heads than before: 1+added heads (2..n)
1975 - more heads than before: 1+added heads (2..n)
1976 - less heads than before: -1-removed heads (-2..-n)
1976 - less heads than before: -1-removed heads (-2..-n)
1977 - number of heads stays the same: 1
1977 - number of heads stays the same: 1
1978 """
1978 """
1979 def csmap(x):
1979 def csmap(x):
1980 self.ui.debug(_("add changeset %s\n") % short(x))
1980 self.ui.debug(_("add changeset %s\n") % short(x))
1981 return len(cl)
1981 return len(cl)
1982
1982
1983 def revmap(x):
1983 def revmap(x):
1984 return cl.rev(x)
1984 return cl.rev(x)
1985
1985
1986 if not source:
1986 if not source:
1987 return 0
1987 return 0
1988
1988
1989 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1989 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1990
1990
1991 changesets = files = revisions = 0
1991 changesets = files = revisions = 0
1992
1992
1993 # write changelog data to temp files so concurrent readers will not see
1993 # write changelog data to temp files so concurrent readers will not see
1994 # inconsistent view
1994 # inconsistent view
1995 cl = self.changelog
1995 cl = self.changelog
1996 cl.delayupdate()
1996 cl.delayupdate()
1997 oldheads = len(cl.heads())
1997 oldheads = len(cl.heads())
1998
1998
1999 tr = self.transaction()
1999 tr = self.transaction()
2000 try:
2000 try:
2001 trp = weakref.proxy(tr)
2001 trp = weakref.proxy(tr)
2002 # pull off the changeset group
2002 # pull off the changeset group
2003 self.ui.status(_("adding changesets\n"))
2003 self.ui.status(_("adding changesets\n"))
2004 clstart = len(cl)
2004 clstart = len(cl)
2005 chunkiter = changegroup.chunkiter(source)
2005 chunkiter = changegroup.chunkiter(source)
2006 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2006 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2007 raise util.Abort(_("received changelog group is empty"))
2007 raise util.Abort(_("received changelog group is empty"))
2008 clend = len(cl)
2008 clend = len(cl)
2009 changesets = clend - clstart
2009 changesets = clend - clstart
2010
2010
2011 # pull off the manifest group
2011 # pull off the manifest group
2012 self.ui.status(_("adding manifests\n"))
2012 self.ui.status(_("adding manifests\n"))
2013 chunkiter = changegroup.chunkiter(source)
2013 chunkiter = changegroup.chunkiter(source)
2014 # no need to check for empty manifest group here:
2014 # no need to check for empty manifest group here:
2015 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2015 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2016 # no new manifest will be created and the manifest group will
2016 # no new manifest will be created and the manifest group will
2017 # be empty during the pull
2017 # be empty during the pull
2018 self.manifest.addgroup(chunkiter, revmap, trp)
2018 self.manifest.addgroup(chunkiter, revmap, trp)
2019
2019
2020 # process the files
2020 # process the files
2021 self.ui.status(_("adding file changes\n"))
2021 self.ui.status(_("adding file changes\n"))
2022 while 1:
2022 while 1:
2023 f = changegroup.getchunk(source)
2023 f = changegroup.getchunk(source)
2024 if not f:
2024 if not f:
2025 break
2025 break
2026 self.ui.debug(_("adding %s revisions\n") % f)
2026 self.ui.debug(_("adding %s revisions\n") % f)
2027 fl = self.file(f)
2027 fl = self.file(f)
2028 o = len(fl)
2028 o = len(fl)
2029 chunkiter = changegroup.chunkiter(source)
2029 chunkiter = changegroup.chunkiter(source)
2030 if fl.addgroup(chunkiter, revmap, trp) is None:
2030 if fl.addgroup(chunkiter, revmap, trp) is None:
2031 raise util.Abort(_("received file revlog group is empty"))
2031 raise util.Abort(_("received file revlog group is empty"))
2032 revisions += len(fl) - o
2032 revisions += len(fl) - o
2033 files += 1
2033 files += 1
2034
2034
2035 newheads = len(cl.heads())
2035 newheads = len(cl.heads())
2036 heads = ""
2036 heads = ""
2037 if oldheads and newheads != oldheads:
2037 if oldheads and newheads != oldheads:
2038 heads = _(" (%+d heads)") % (newheads - oldheads)
2038 heads = _(" (%+d heads)") % (newheads - oldheads)
2039
2039
2040 self.ui.status(_("added %d changesets"
2040 self.ui.status(_("added %d changesets"
2041 " with %d changes to %d files%s\n")
2041 " with %d changes to %d files%s\n")
2042 % (changesets, revisions, files, heads))
2042 % (changesets, revisions, files, heads))
2043
2043
2044 if changesets > 0:
2044 if changesets > 0:
2045 p = lambda: cl.writepending() and self.root or ""
2045 p = lambda: cl.writepending() and self.root or ""
2046 self.hook('pretxnchangegroup', throw=True,
2046 self.hook('pretxnchangegroup', throw=True,
2047 node=hex(cl.node(clstart)), source=srctype,
2047 node=hex(cl.node(clstart)), source=srctype,
2048 url=url, pending=p)
2048 url=url, pending=p)
2049
2049
2050 # make changelog see real files again
2050 # make changelog see real files again
2051 cl.finalize(trp)
2051 cl.finalize(trp)
2052
2052
2053 tr.close()
2053 tr.close()
2054 finally:
2054 finally:
2055 del tr
2055 del tr
2056
2056
2057 if changesets > 0:
2057 if changesets > 0:
2058 # forcefully update the on-disk branch cache
2058 # forcefully update the on-disk branch cache
2059 self.ui.debug(_("updating the branch cache\n"))
2059 self.ui.debug(_("updating the branch cache\n"))
2060 self.branchtags()
2060 self.branchtags()
2061 self.hook("changegroup", node=hex(cl.node(clstart)),
2061 self.hook("changegroup", node=hex(cl.node(clstart)),
2062 source=srctype, url=url)
2062 source=srctype, url=url)
2063
2063
2064 for i in xrange(clstart, clend):
2064 for i in xrange(clstart, clend):
2065 self.hook("incoming", node=hex(cl.node(i)),
2065 self.hook("incoming", node=hex(cl.node(i)),
2066 source=srctype, url=url)
2066 source=srctype, url=url)
2067
2067
2068 # never return 0 here:
2068 # never return 0 here:
2069 if newheads < oldheads:
2069 if newheads < oldheads:
2070 return newheads - oldheads - 1
2070 return newheads - oldheads - 1
2071 else:
2071 else:
2072 return newheads - oldheads + 1
2072 return newheads - oldheads + 1
2073
2073
2074
2074
2075 def stream_in(self, remote):
2075 def stream_in(self, remote):
2076 fp = remote.stream_out()
2076 fp = remote.stream_out()
2077 l = fp.readline()
2077 l = fp.readline()
2078 try:
2078 try:
2079 resp = int(l)
2079 resp = int(l)
2080 except ValueError:
2080 except ValueError:
2081 raise error.ResponseError(
2081 raise error.ResponseError(
2082 _('Unexpected response from remote server:'), l)
2082 _('Unexpected response from remote server:'), l)
2083 if resp == 1:
2083 if resp == 1:
2084 raise util.Abort(_('operation forbidden by server'))
2084 raise util.Abort(_('operation forbidden by server'))
2085 elif resp == 2:
2085 elif resp == 2:
2086 raise util.Abort(_('locking the remote repository failed'))
2086 raise util.Abort(_('locking the remote repository failed'))
2087 elif resp != 0:
2087 elif resp != 0:
2088 raise util.Abort(_('the server sent an unknown error code'))
2088 raise util.Abort(_('the server sent an unknown error code'))
2089 self.ui.status(_('streaming all changes\n'))
2089 self.ui.status(_('streaming all changes\n'))
2090 l = fp.readline()
2090 l = fp.readline()
2091 try:
2091 try:
2092 total_files, total_bytes = map(int, l.split(' ', 1))
2092 total_files, total_bytes = map(int, l.split(' ', 1))
2093 except (ValueError, TypeError):
2093 except (ValueError, TypeError):
2094 raise error.ResponseError(
2094 raise error.ResponseError(
2095 _('Unexpected response from remote server:'), l)
2095 _('Unexpected response from remote server:'), l)
2096 self.ui.status(_('%d files to transfer, %s of data\n') %
2096 self.ui.status(_('%d files to transfer, %s of data\n') %
2097 (total_files, util.bytecount(total_bytes)))
2097 (total_files, util.bytecount(total_bytes)))
2098 start = time.time()
2098 start = time.time()
2099 for i in xrange(total_files):
2099 for i in xrange(total_files):
2100 # XXX doesn't support '\n' or '\r' in filenames
2100 # XXX doesn't support '\n' or '\r' in filenames
2101 l = fp.readline()
2101 l = fp.readline()
2102 try:
2102 try:
2103 name, size = l.split('\0', 1)
2103 name, size = l.split('\0', 1)
2104 size = int(size)
2104 size = int(size)
2105 except (ValueError, TypeError):
2105 except (ValueError, TypeError):
2106 raise error.ResponseError(
2106 raise error.ResponseError(
2107 _('Unexpected response from remote server:'), l)
2107 _('Unexpected response from remote server:'), l)
2108 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2108 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2109 # for backwards compat, name was partially encoded
2109 # for backwards compat, name was partially encoded
2110 ofp = self.sopener(store.decodedir(name), 'w')
2110 ofp = self.sopener(store.decodedir(name), 'w')
2111 for chunk in util.filechunkiter(fp, limit=size):
2111 for chunk in util.filechunkiter(fp, limit=size):
2112 ofp.write(chunk)
2112 ofp.write(chunk)
2113 ofp.close()
2113 ofp.close()
2114 elapsed = time.time() - start
2114 elapsed = time.time() - start
2115 if elapsed <= 0:
2115 if elapsed <= 0:
2116 elapsed = 0.001
2116 elapsed = 0.001
2117 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2117 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2118 (util.bytecount(total_bytes), elapsed,
2118 (util.bytecount(total_bytes), elapsed,
2119 util.bytecount(total_bytes / elapsed)))
2119 util.bytecount(total_bytes / elapsed)))
2120 self.invalidate()
2120 self.invalidate()
2121 return len(self.heads()) + 1
2121 return len(self.heads()) + 1
2122
2122
2123 def clone(self, remote, heads=[], stream=False):
2123 def clone(self, remote, heads=[], stream=False):
2124 '''clone remote repository.
2124 '''clone remote repository.
2125
2125
2126 keyword arguments:
2126 keyword arguments:
2127 heads: list of revs to clone (forces use of pull)
2127 heads: list of revs to clone (forces use of pull)
2128 stream: use streaming clone if possible'''
2128 stream: use streaming clone if possible'''
2129
2129
2130 # now, all clients that can request uncompressed clones can
2130 # now, all clients that can request uncompressed clones can
2131 # read repo formats supported by all servers that can serve
2131 # read repo formats supported by all servers that can serve
2132 # them.
2132 # them.
2133
2133
2134 # if revlog format changes, client will have to check version
2134 # if revlog format changes, client will have to check version
2135 # and format flags on "stream" capability, and use
2135 # and format flags on "stream" capability, and use
2136 # uncompressed only if compatible.
2136 # uncompressed only if compatible.
2137
2137
2138 if stream and not heads and remote.capable('stream'):
2138 if stream and not heads and remote.capable('stream'):
2139 return self.stream_in(remote)
2139 return self.stream_in(remote)
2140 return self.pull(remote, heads)
2140 return self.pull(remote, heads)
2141
2141
2142 # used to avoid circular references so destructors work
2142 # used to avoid circular references so destructors work
2143 def aftertrans(files):
2143 def aftertrans(files):
2144 renamefiles = [tuple(t) for t in files]
2144 renamefiles = [tuple(t) for t in files]
2145 def a():
2145 def a():
2146 for src, dest in renamefiles:
2146 for src, dest in renamefiles:
2147 util.rename(src, dest)
2147 util.rename(src, dest)
2148 return a
2148 return a
2149
2149
2150 def instance(ui, path, create):
2150 def instance(ui, path, create):
2151 return localrepository(ui, util.drop_scheme('file', path), create)
2151 return localrepository(ui, util.drop_scheme('file', path), create)
2152
2152
2153 def islocal(path):
2153 def islocal(path):
2154 return True
2154 return True
General Comments 0
You need to be logged in to leave comments. Login now