##// END OF EJS Templates
bundle: don't send too many changesets (Issue1704)...
Peter Arrenbrecht -
r9820:0b999aec default
parent child Browse files
Show More
@@ -1,2160 +1,2158
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2, incorporated herein by reference.
6 # GNU General Public License version 2, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo
10 import repo, changegroup, subrepo
11 import changelog, dirstate, filelog, manifest, context
11 import changelog, dirstate, filelog, manifest, context
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import util, extensions, hook, error
13 import util, extensions, hook, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16 import tags as tags_
16 import tags as tags_
17 from lock import release
17 from lock import release
18 import weakref, stat, errno, os, time, inspect
18 import weakref, stat, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20
20
21 class localrepository(repo.repository):
21 class localrepository(repo.repository):
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap'))
23 supported = set('revlogv1 store fncache shared'.split())
23 supported = set('revlogv1 store fncache shared'.split())
24
24
25 def __init__(self, baseui, path=None, create=0):
25 def __init__(self, baseui, path=None, create=0):
26 repo.repository.__init__(self)
26 repo.repository.__init__(self)
27 self.root = os.path.realpath(path)
27 self.root = os.path.realpath(path)
28 self.path = os.path.join(self.root, ".hg")
28 self.path = os.path.join(self.root, ".hg")
29 self.origroot = path
29 self.origroot = path
30 self.opener = util.opener(self.path)
30 self.opener = util.opener(self.path)
31 self.wopener = util.opener(self.root)
31 self.wopener = util.opener(self.root)
32 self.baseui = baseui
32 self.baseui = baseui
33 self.ui = baseui.copy()
33 self.ui = baseui.copy()
34
34
35 try:
35 try:
36 self.ui.readconfig(self.join("hgrc"), self.root)
36 self.ui.readconfig(self.join("hgrc"), self.root)
37 extensions.loadall(self.ui)
37 extensions.loadall(self.ui)
38 except IOError:
38 except IOError:
39 pass
39 pass
40
40
41 if not os.path.isdir(self.path):
41 if not os.path.isdir(self.path):
42 if create:
42 if create:
43 if not os.path.exists(path):
43 if not os.path.exists(path):
44 os.mkdir(path)
44 os.mkdir(path)
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 requirements = ["revlogv1"]
46 requirements = ["revlogv1"]
47 if self.ui.configbool('format', 'usestore', True):
47 if self.ui.configbool('format', 'usestore', True):
48 os.mkdir(os.path.join(self.path, "store"))
48 os.mkdir(os.path.join(self.path, "store"))
49 requirements.append("store")
49 requirements.append("store")
50 if self.ui.configbool('format', 'usefncache', True):
50 if self.ui.configbool('format', 'usefncache', True):
51 requirements.append("fncache")
51 requirements.append("fncache")
52 # create an invalid changelog
52 # create an invalid changelog
53 self.opener("00changelog.i", "a").write(
53 self.opener("00changelog.i", "a").write(
54 '\0\0\0\2' # represents revlogv2
54 '\0\0\0\2' # represents revlogv2
55 ' dummy changelog to prevent using the old repo layout'
55 ' dummy changelog to prevent using the old repo layout'
56 )
56 )
57 reqfile = self.opener("requires", "w")
57 reqfile = self.opener("requires", "w")
58 for r in requirements:
58 for r in requirements:
59 reqfile.write("%s\n" % r)
59 reqfile.write("%s\n" % r)
60 reqfile.close()
60 reqfile.close()
61 else:
61 else:
62 raise error.RepoError(_("repository %s not found") % path)
62 raise error.RepoError(_("repository %s not found") % path)
63 elif create:
63 elif create:
64 raise error.RepoError(_("repository %s already exists") % path)
64 raise error.RepoError(_("repository %s already exists") % path)
65 else:
65 else:
66 # find requirements
66 # find requirements
67 requirements = set()
67 requirements = set()
68 try:
68 try:
69 requirements = set(self.opener("requires").read().splitlines())
69 requirements = set(self.opener("requires").read().splitlines())
70 except IOError, inst:
70 except IOError, inst:
71 if inst.errno != errno.ENOENT:
71 if inst.errno != errno.ENOENT:
72 raise
72 raise
73 for r in requirements - self.supported:
73 for r in requirements - self.supported:
74 raise error.RepoError(_("requirement '%s' not supported") % r)
74 raise error.RepoError(_("requirement '%s' not supported") % r)
75
75
76 self.sharedpath = self.path
76 self.sharedpath = self.path
77 try:
77 try:
78 s = os.path.realpath(self.opener("sharedpath").read())
78 s = os.path.realpath(self.opener("sharedpath").read())
79 if not os.path.exists(s):
79 if not os.path.exists(s):
80 raise error.RepoError(
80 raise error.RepoError(
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
81 _('.hg/sharedpath points to nonexistent directory %s') % s)
82 self.sharedpath = s
82 self.sharedpath = s
83 except IOError, inst:
83 except IOError, inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86
86
87 self.store = store.store(requirements, self.sharedpath, util.opener)
87 self.store = store.store(requirements, self.sharedpath, util.opener)
88 self.spath = self.store.path
88 self.spath = self.store.path
89 self.sopener = self.store.opener
89 self.sopener = self.store.opener
90 self.sjoin = self.store.join
90 self.sjoin = self.store.join
91 self.opener.createmode = self.store.createmode
91 self.opener.createmode = self.store.createmode
92
92
93 # These two define the set of tags for this repository. _tags
93 # These two define the set of tags for this repository. _tags
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
94 # maps tag name to node; _tagtypes maps tag name to 'global' or
95 # 'local'. (Global tags are defined by .hgtags across all
95 # 'local'. (Global tags are defined by .hgtags across all
96 # heads, and local tags are defined in .hg/localtags.) They
96 # heads, and local tags are defined in .hg/localtags.) They
97 # constitute the in-memory cache of tags.
97 # constitute the in-memory cache of tags.
98 self._tags = None
98 self._tags = None
99 self._tagtypes = None
99 self._tagtypes = None
100
100
101 self._branchcache = None # in UTF-8
101 self._branchcache = None # in UTF-8
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.nodetagscache = None
103 self.nodetagscache = None
104 self.filterpats = {}
104 self.filterpats = {}
105 self._datafilters = {}
105 self._datafilters = {}
106 self._transref = self._lockref = self._wlockref = None
106 self._transref = self._lockref = self._wlockref = None
107
107
108 @propertycache
108 @propertycache
109 def changelog(self):
109 def changelog(self):
110 c = changelog.changelog(self.sopener)
110 c = changelog.changelog(self.sopener)
111 if 'HG_PENDING' in os.environ:
111 if 'HG_PENDING' in os.environ:
112 p = os.environ['HG_PENDING']
112 p = os.environ['HG_PENDING']
113 if p.startswith(self.root):
113 if p.startswith(self.root):
114 c.readpending('00changelog.i.a')
114 c.readpending('00changelog.i.a')
115 self.sopener.defversion = c.version
115 self.sopener.defversion = c.version
116 return c
116 return c
117
117
118 @propertycache
118 @propertycache
119 def manifest(self):
119 def manifest(self):
120 return manifest.manifest(self.sopener)
120 return manifest.manifest(self.sopener)
121
121
122 @propertycache
122 @propertycache
123 def dirstate(self):
123 def dirstate(self):
124 return dirstate.dirstate(self.opener, self.ui, self.root)
124 return dirstate.dirstate(self.opener, self.ui, self.root)
125
125
126 def __getitem__(self, changeid):
126 def __getitem__(self, changeid):
127 if changeid is None:
127 if changeid is None:
128 return context.workingctx(self)
128 return context.workingctx(self)
129 return context.changectx(self, changeid)
129 return context.changectx(self, changeid)
130
130
131 def __nonzero__(self):
131 def __nonzero__(self):
132 return True
132 return True
133
133
134 def __len__(self):
134 def __len__(self):
135 return len(self.changelog)
135 return len(self.changelog)
136
136
137 def __iter__(self):
137 def __iter__(self):
138 for i in xrange(len(self)):
138 for i in xrange(len(self)):
139 yield i
139 yield i
140
140
141 def url(self):
141 def url(self):
142 return 'file:' + self.root
142 return 'file:' + self.root
143
143
144 def hook(self, name, throw=False, **args):
144 def hook(self, name, throw=False, **args):
145 return hook.hook(self.ui, self, name, throw, **args)
145 return hook.hook(self.ui, self, name, throw, **args)
146
146
147 tag_disallowed = ':\r\n'
147 tag_disallowed = ':\r\n'
148
148
149 def _tag(self, names, node, message, local, user, date, extra={}):
149 def _tag(self, names, node, message, local, user, date, extra={}):
150 if isinstance(names, str):
150 if isinstance(names, str):
151 allchars = names
151 allchars = names
152 names = (names,)
152 names = (names,)
153 else:
153 else:
154 allchars = ''.join(names)
154 allchars = ''.join(names)
155 for c in self.tag_disallowed:
155 for c in self.tag_disallowed:
156 if c in allchars:
156 if c in allchars:
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
157 raise util.Abort(_('%r cannot be used in a tag name') % c)
158
158
159 for name in names:
159 for name in names:
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
160 self.hook('pretag', throw=True, node=hex(node), tag=name,
161 local=local)
161 local=local)
162
162
163 def writetags(fp, names, munge, prevtags):
163 def writetags(fp, names, munge, prevtags):
164 fp.seek(0, 2)
164 fp.seek(0, 2)
165 if prevtags and prevtags[-1] != '\n':
165 if prevtags and prevtags[-1] != '\n':
166 fp.write('\n')
166 fp.write('\n')
167 for name in names:
167 for name in names:
168 m = munge and munge(name) or name
168 m = munge and munge(name) or name
169 if self._tagtypes and name in self._tagtypes:
169 if self._tagtypes and name in self._tagtypes:
170 old = self._tags.get(name, nullid)
170 old = self._tags.get(name, nullid)
171 fp.write('%s %s\n' % (hex(old), m))
171 fp.write('%s %s\n' % (hex(old), m))
172 fp.write('%s %s\n' % (hex(node), m))
172 fp.write('%s %s\n' % (hex(node), m))
173 fp.close()
173 fp.close()
174
174
175 prevtags = ''
175 prevtags = ''
176 if local:
176 if local:
177 try:
177 try:
178 fp = self.opener('localtags', 'r+')
178 fp = self.opener('localtags', 'r+')
179 except IOError:
179 except IOError:
180 fp = self.opener('localtags', 'a')
180 fp = self.opener('localtags', 'a')
181 else:
181 else:
182 prevtags = fp.read()
182 prevtags = fp.read()
183
183
184 # local tags are stored in the current charset
184 # local tags are stored in the current charset
185 writetags(fp, names, None, prevtags)
185 writetags(fp, names, None, prevtags)
186 for name in names:
186 for name in names:
187 self.hook('tag', node=hex(node), tag=name, local=local)
187 self.hook('tag', node=hex(node), tag=name, local=local)
188 return
188 return
189
189
190 try:
190 try:
191 fp = self.wfile('.hgtags', 'rb+')
191 fp = self.wfile('.hgtags', 'rb+')
192 except IOError:
192 except IOError:
193 fp = self.wfile('.hgtags', 'ab')
193 fp = self.wfile('.hgtags', 'ab')
194 else:
194 else:
195 prevtags = fp.read()
195 prevtags = fp.read()
196
196
197 # committed tags are stored in UTF-8
197 # committed tags are stored in UTF-8
198 writetags(fp, names, encoding.fromlocal, prevtags)
198 writetags(fp, names, encoding.fromlocal, prevtags)
199
199
200 if '.hgtags' not in self.dirstate:
200 if '.hgtags' not in self.dirstate:
201 self.add(['.hgtags'])
201 self.add(['.hgtags'])
202
202
203 m = match_.exact(self.root, '', ['.hgtags'])
203 m = match_.exact(self.root, '', ['.hgtags'])
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
204 tagnode = self.commit(message, user, date, extra=extra, match=m)
205
205
206 for name in names:
206 for name in names:
207 self.hook('tag', node=hex(node), tag=name, local=local)
207 self.hook('tag', node=hex(node), tag=name, local=local)
208
208
209 return tagnode
209 return tagnode
210
210
211 def tag(self, names, node, message, local, user, date):
211 def tag(self, names, node, message, local, user, date):
212 '''tag a revision with one or more symbolic names.
212 '''tag a revision with one or more symbolic names.
213
213
214 names is a list of strings or, when adding a single tag, names may be a
214 names is a list of strings or, when adding a single tag, names may be a
215 string.
215 string.
216
216
217 if local is True, the tags are stored in a per-repository file.
217 if local is True, the tags are stored in a per-repository file.
218 otherwise, they are stored in the .hgtags file, and a new
218 otherwise, they are stored in the .hgtags file, and a new
219 changeset is committed with the change.
219 changeset is committed with the change.
220
220
221 keyword arguments:
221 keyword arguments:
222
222
223 local: whether to store tags in non-version-controlled file
223 local: whether to store tags in non-version-controlled file
224 (default False)
224 (default False)
225
225
226 message: commit message to use if committing
226 message: commit message to use if committing
227
227
228 user: name of user to use if committing
228 user: name of user to use if committing
229
229
230 date: date tuple to use if committing'''
230 date: date tuple to use if committing'''
231
231
232 for x in self.status()[:5]:
232 for x in self.status()[:5]:
233 if '.hgtags' in x:
233 if '.hgtags' in x:
234 raise util.Abort(_('working copy of .hgtags is changed '
234 raise util.Abort(_('working copy of .hgtags is changed '
235 '(please commit .hgtags manually)'))
235 '(please commit .hgtags manually)'))
236
236
237 self.tags() # instantiate the cache
237 self.tags() # instantiate the cache
238 self._tag(names, node, message, local, user, date)
238 self._tag(names, node, message, local, user, date)
239
239
240 def tags(self):
240 def tags(self):
241 '''return a mapping of tag to node'''
241 '''return a mapping of tag to node'''
242 if self._tags is None:
242 if self._tags is None:
243 (self._tags, self._tagtypes) = self._findtags()
243 (self._tags, self._tagtypes) = self._findtags()
244
244
245 return self._tags
245 return self._tags
246
246
247 def _findtags(self):
247 def _findtags(self):
248 '''Do the hard work of finding tags. Return a pair of dicts
248 '''Do the hard work of finding tags. Return a pair of dicts
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
249 (tags, tagtypes) where tags maps tag name to node, and tagtypes
250 maps tag name to a string like \'global\' or \'local\'.
250 maps tag name to a string like \'global\' or \'local\'.
251 Subclasses or extensions are free to add their own tags, but
251 Subclasses or extensions are free to add their own tags, but
252 should be aware that the returned dicts will be retained for the
252 should be aware that the returned dicts will be retained for the
253 duration of the localrepo object.'''
253 duration of the localrepo object.'''
254
254
255 # XXX what tagtype should subclasses/extensions use? Currently
255 # XXX what tagtype should subclasses/extensions use? Currently
256 # mq and bookmarks add tags, but do not set the tagtype at all.
256 # mq and bookmarks add tags, but do not set the tagtype at all.
257 # Should each extension invent its own tag type? Should there
257 # Should each extension invent its own tag type? Should there
258 # be one tagtype for all such "virtual" tags? Or is the status
258 # be one tagtype for all such "virtual" tags? Or is the status
259 # quo fine?
259 # quo fine?
260
260
261 alltags = {} # map tag name to (node, hist)
261 alltags = {} # map tag name to (node, hist)
262 tagtypes = {}
262 tagtypes = {}
263
263
264 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
264 tags_.findglobaltags(self.ui, self, alltags, tagtypes)
265 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
265 tags_.readlocaltags(self.ui, self, alltags, tagtypes)
266
266
267 # Build the return dicts. Have to re-encode tag names because
267 # Build the return dicts. Have to re-encode tag names because
268 # the tags module always uses UTF-8 (in order not to lose info
268 # the tags module always uses UTF-8 (in order not to lose info
269 # writing to the cache), but the rest of Mercurial wants them in
269 # writing to the cache), but the rest of Mercurial wants them in
270 # local encoding.
270 # local encoding.
271 tags = {}
271 tags = {}
272 for (name, (node, hist)) in alltags.iteritems():
272 for (name, (node, hist)) in alltags.iteritems():
273 if node != nullid:
273 if node != nullid:
274 tags[encoding.tolocal(name)] = node
274 tags[encoding.tolocal(name)] = node
275 tags['tip'] = self.changelog.tip()
275 tags['tip'] = self.changelog.tip()
276 tagtypes = dict([(encoding.tolocal(name), value)
276 tagtypes = dict([(encoding.tolocal(name), value)
277 for (name, value) in tagtypes.iteritems()])
277 for (name, value) in tagtypes.iteritems()])
278 return (tags, tagtypes)
278 return (tags, tagtypes)
279
279
280 def tagtype(self, tagname):
280 def tagtype(self, tagname):
281 '''
281 '''
282 return the type of the given tag. result can be:
282 return the type of the given tag. result can be:
283
283
284 'local' : a local tag
284 'local' : a local tag
285 'global' : a global tag
285 'global' : a global tag
286 None : tag does not exist
286 None : tag does not exist
287 '''
287 '''
288
288
289 self.tags()
289 self.tags()
290
290
291 return self._tagtypes.get(tagname)
291 return self._tagtypes.get(tagname)
292
292
293 def tagslist(self):
293 def tagslist(self):
294 '''return a list of tags ordered by revision'''
294 '''return a list of tags ordered by revision'''
295 l = []
295 l = []
296 for t, n in self.tags().iteritems():
296 for t, n in self.tags().iteritems():
297 try:
297 try:
298 r = self.changelog.rev(n)
298 r = self.changelog.rev(n)
299 except:
299 except:
300 r = -2 # sort to the beginning of the list if unknown
300 r = -2 # sort to the beginning of the list if unknown
301 l.append((r, t, n))
301 l.append((r, t, n))
302 return [(t, n) for r, t, n in sorted(l)]
302 return [(t, n) for r, t, n in sorted(l)]
303
303
304 def nodetags(self, node):
304 def nodetags(self, node):
305 '''return the tags associated with a node'''
305 '''return the tags associated with a node'''
306 if not self.nodetagscache:
306 if not self.nodetagscache:
307 self.nodetagscache = {}
307 self.nodetagscache = {}
308 for t, n in self.tags().iteritems():
308 for t, n in self.tags().iteritems():
309 self.nodetagscache.setdefault(n, []).append(t)
309 self.nodetagscache.setdefault(n, []).append(t)
310 return self.nodetagscache.get(node, [])
310 return self.nodetagscache.get(node, [])
311
311
312 def _branchtags(self, partial, lrev):
312 def _branchtags(self, partial, lrev):
313 # TODO: rename this function?
313 # TODO: rename this function?
314 tiprev = len(self) - 1
314 tiprev = len(self) - 1
315 if lrev != tiprev:
315 if lrev != tiprev:
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318
318
319 return partial
319 return partial
320
320
321 def branchmap(self):
321 def branchmap(self):
322 tip = self.changelog.tip()
322 tip = self.changelog.tip()
323 if self._branchcache is not None and self._branchcachetip == tip:
323 if self._branchcache is not None and self._branchcachetip == tip:
324 return self._branchcache
324 return self._branchcache
325
325
326 oldtip = self._branchcachetip
326 oldtip = self._branchcachetip
327 self._branchcachetip = tip
327 self._branchcachetip = tip
328 if oldtip is None or oldtip not in self.changelog.nodemap:
328 if oldtip is None or oldtip not in self.changelog.nodemap:
329 partial, last, lrev = self._readbranchcache()
329 partial, last, lrev = self._readbranchcache()
330 else:
330 else:
331 lrev = self.changelog.rev(oldtip)
331 lrev = self.changelog.rev(oldtip)
332 partial = self._branchcache
332 partial = self._branchcache
333
333
334 self._branchtags(partial, lrev)
334 self._branchtags(partial, lrev)
335 # this private cache holds all heads (not just tips)
335 # this private cache holds all heads (not just tips)
336 self._branchcache = partial
336 self._branchcache = partial
337
337
338 return self._branchcache
338 return self._branchcache
339
339
340 def branchtags(self):
340 def branchtags(self):
341 '''return a dict where branch names map to the tipmost head of
341 '''return a dict where branch names map to the tipmost head of
342 the branch, open heads come before closed'''
342 the branch, open heads come before closed'''
343 bt = {}
343 bt = {}
344 for bn, heads in self.branchmap().iteritems():
344 for bn, heads in self.branchmap().iteritems():
345 head = None
345 head = None
346 for i in range(len(heads)-1, -1, -1):
346 for i in range(len(heads)-1, -1, -1):
347 h = heads[i]
347 h = heads[i]
348 if 'close' not in self.changelog.read(h)[5]:
348 if 'close' not in self.changelog.read(h)[5]:
349 head = h
349 head = h
350 break
350 break
351 # no open heads were found
351 # no open heads were found
352 if head is None:
352 if head is None:
353 head = heads[-1]
353 head = heads[-1]
354 bt[bn] = head
354 bt[bn] = head
355 return bt
355 return bt
356
356
357
357
358 def _readbranchcache(self):
358 def _readbranchcache(self):
359 partial = {}
359 partial = {}
360 try:
360 try:
361 f = self.opener("branchheads.cache")
361 f = self.opener("branchheads.cache")
362 lines = f.read().split('\n')
362 lines = f.read().split('\n')
363 f.close()
363 f.close()
364 except (IOError, OSError):
364 except (IOError, OSError):
365 return {}, nullid, nullrev
365 return {}, nullid, nullrev
366
366
367 try:
367 try:
368 last, lrev = lines.pop(0).split(" ", 1)
368 last, lrev = lines.pop(0).split(" ", 1)
369 last, lrev = bin(last), int(lrev)
369 last, lrev = bin(last), int(lrev)
370 if lrev >= len(self) or self[lrev].node() != last:
370 if lrev >= len(self) or self[lrev].node() != last:
371 # invalidate the cache
371 # invalidate the cache
372 raise ValueError('invalidating branch cache (tip differs)')
372 raise ValueError('invalidating branch cache (tip differs)')
373 for l in lines:
373 for l in lines:
374 if not l: continue
374 if not l: continue
375 node, label = l.split(" ", 1)
375 node, label = l.split(" ", 1)
376 partial.setdefault(label.strip(), []).append(bin(node))
376 partial.setdefault(label.strip(), []).append(bin(node))
377 except KeyboardInterrupt:
377 except KeyboardInterrupt:
378 raise
378 raise
379 except Exception, inst:
379 except Exception, inst:
380 if self.ui.debugflag:
380 if self.ui.debugflag:
381 self.ui.warn(str(inst), '\n')
381 self.ui.warn(str(inst), '\n')
382 partial, last, lrev = {}, nullid, nullrev
382 partial, last, lrev = {}, nullid, nullrev
383 return partial, last, lrev
383 return partial, last, lrev
384
384
385 def _writebranchcache(self, branches, tip, tiprev):
385 def _writebranchcache(self, branches, tip, tiprev):
386 try:
386 try:
387 f = self.opener("branchheads.cache", "w", atomictemp=True)
387 f = self.opener("branchheads.cache", "w", atomictemp=True)
388 f.write("%s %s\n" % (hex(tip), tiprev))
388 f.write("%s %s\n" % (hex(tip), tiprev))
389 for label, nodes in branches.iteritems():
389 for label, nodes in branches.iteritems():
390 for node in nodes:
390 for node in nodes:
391 f.write("%s %s\n" % (hex(node), label))
391 f.write("%s %s\n" % (hex(node), label))
392 f.rename()
392 f.rename()
393 except (IOError, OSError):
393 except (IOError, OSError):
394 pass
394 pass
395
395
396 def _updatebranchcache(self, partial, start, end):
396 def _updatebranchcache(self, partial, start, end):
397 # collect new branch entries
397 # collect new branch entries
398 newbranches = {}
398 newbranches = {}
399 for r in xrange(start, end):
399 for r in xrange(start, end):
400 c = self[r]
400 c = self[r]
401 newbranches.setdefault(c.branch(), []).append(c.node())
401 newbranches.setdefault(c.branch(), []).append(c.node())
402 # if older branchheads are reachable from new ones, they aren't
402 # if older branchheads are reachable from new ones, they aren't
403 # really branchheads. Note checking parents is insufficient:
403 # really branchheads. Note checking parents is insufficient:
404 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
404 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
405 for branch, newnodes in newbranches.iteritems():
405 for branch, newnodes in newbranches.iteritems():
406 bheads = partial.setdefault(branch, [])
406 bheads = partial.setdefault(branch, [])
407 bheads.extend(newnodes)
407 bheads.extend(newnodes)
408 if len(bheads) < 2:
408 if len(bheads) < 2:
409 continue
409 continue
410 newbheads = []
410 newbheads = []
411 # starting from tip means fewer passes over reachable
411 # starting from tip means fewer passes over reachable
412 while newnodes:
412 while newnodes:
413 latest = newnodes.pop()
413 latest = newnodes.pop()
414 if latest not in bheads:
414 if latest not in bheads:
415 continue
415 continue
416 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
416 minbhrev = self[min([self[bh].rev() for bh in bheads])].node()
417 reachable = self.changelog.reachable(latest, minbhrev)
417 reachable = self.changelog.reachable(latest, minbhrev)
418 bheads = [b for b in bheads if b not in reachable]
418 bheads = [b for b in bheads if b not in reachable]
419 newbheads.insert(0, latest)
419 newbheads.insert(0, latest)
420 bheads.extend(newbheads)
420 bheads.extend(newbheads)
421 partial[branch] = bheads
421 partial[branch] = bheads
422
422
423 def lookup(self, key):
423 def lookup(self, key):
424 if isinstance(key, int):
424 if isinstance(key, int):
425 return self.changelog.node(key)
425 return self.changelog.node(key)
426 elif key == '.':
426 elif key == '.':
427 return self.dirstate.parents()[0]
427 return self.dirstate.parents()[0]
428 elif key == 'null':
428 elif key == 'null':
429 return nullid
429 return nullid
430 elif key == 'tip':
430 elif key == 'tip':
431 return self.changelog.tip()
431 return self.changelog.tip()
432 n = self.changelog._match(key)
432 n = self.changelog._match(key)
433 if n:
433 if n:
434 return n
434 return n
435 if key in self.tags():
435 if key in self.tags():
436 return self.tags()[key]
436 return self.tags()[key]
437 if key in self.branchtags():
437 if key in self.branchtags():
438 return self.branchtags()[key]
438 return self.branchtags()[key]
439 n = self.changelog._partialmatch(key)
439 n = self.changelog._partialmatch(key)
440 if n:
440 if n:
441 return n
441 return n
442
442
443 # can't find key, check if it might have come from damaged dirstate
443 # can't find key, check if it might have come from damaged dirstate
444 if key in self.dirstate.parents():
444 if key in self.dirstate.parents():
445 raise error.Abort(_("working directory has unknown parent '%s'!")
445 raise error.Abort(_("working directory has unknown parent '%s'!")
446 % short(key))
446 % short(key))
447 try:
447 try:
448 if len(key) == 20:
448 if len(key) == 20:
449 key = hex(key)
449 key = hex(key)
450 except:
450 except:
451 pass
451 pass
452 raise error.RepoLookupError(_("unknown revision '%s'") % key)
452 raise error.RepoLookupError(_("unknown revision '%s'") % key)
453
453
454 def local(self):
454 def local(self):
455 return True
455 return True
456
456
457 def join(self, f):
457 def join(self, f):
458 return os.path.join(self.path, f)
458 return os.path.join(self.path, f)
459
459
460 def wjoin(self, f):
460 def wjoin(self, f):
461 return os.path.join(self.root, f)
461 return os.path.join(self.root, f)
462
462
463 def rjoin(self, f):
463 def rjoin(self, f):
464 return os.path.join(self.root, util.pconvert(f))
464 return os.path.join(self.root, util.pconvert(f))
465
465
466 def file(self, f):
466 def file(self, f):
467 if f[0] == '/':
467 if f[0] == '/':
468 f = f[1:]
468 f = f[1:]
469 return filelog.filelog(self.sopener, f)
469 return filelog.filelog(self.sopener, f)
470
470
471 def changectx(self, changeid):
471 def changectx(self, changeid):
472 return self[changeid]
472 return self[changeid]
473
473
474 def parents(self, changeid=None):
474 def parents(self, changeid=None):
475 '''get list of changectxs for parents of changeid'''
475 '''get list of changectxs for parents of changeid'''
476 return self[changeid].parents()
476 return self[changeid].parents()
477
477
478 def filectx(self, path, changeid=None, fileid=None):
478 def filectx(self, path, changeid=None, fileid=None):
479 """changeid can be a changeset revision, node, or tag.
479 """changeid can be a changeset revision, node, or tag.
480 fileid can be a file revision or node."""
480 fileid can be a file revision or node."""
481 return context.filectx(self, path, changeid, fileid)
481 return context.filectx(self, path, changeid, fileid)
482
482
483 def getcwd(self):
483 def getcwd(self):
484 return self.dirstate.getcwd()
484 return self.dirstate.getcwd()
485
485
486 def pathto(self, f, cwd=None):
486 def pathto(self, f, cwd=None):
487 return self.dirstate.pathto(f, cwd)
487 return self.dirstate.pathto(f, cwd)
488
488
489 def wfile(self, f, mode='r'):
489 def wfile(self, f, mode='r'):
490 return self.wopener(f, mode)
490 return self.wopener(f, mode)
491
491
492 def _link(self, f):
492 def _link(self, f):
493 return os.path.islink(self.wjoin(f))
493 return os.path.islink(self.wjoin(f))
494
494
495 def _filter(self, filter, filename, data):
495 def _filter(self, filter, filename, data):
496 if filter not in self.filterpats:
496 if filter not in self.filterpats:
497 l = []
497 l = []
498 for pat, cmd in self.ui.configitems(filter):
498 for pat, cmd in self.ui.configitems(filter):
499 if cmd == '!':
499 if cmd == '!':
500 continue
500 continue
501 mf = match_.match(self.root, '', [pat])
501 mf = match_.match(self.root, '', [pat])
502 fn = None
502 fn = None
503 params = cmd
503 params = cmd
504 for name, filterfn in self._datafilters.iteritems():
504 for name, filterfn in self._datafilters.iteritems():
505 if cmd.startswith(name):
505 if cmd.startswith(name):
506 fn = filterfn
506 fn = filterfn
507 params = cmd[len(name):].lstrip()
507 params = cmd[len(name):].lstrip()
508 break
508 break
509 if not fn:
509 if not fn:
510 fn = lambda s, c, **kwargs: util.filter(s, c)
510 fn = lambda s, c, **kwargs: util.filter(s, c)
511 # Wrap old filters not supporting keyword arguments
511 # Wrap old filters not supporting keyword arguments
512 if not inspect.getargspec(fn)[2]:
512 if not inspect.getargspec(fn)[2]:
513 oldfn = fn
513 oldfn = fn
514 fn = lambda s, c, **kwargs: oldfn(s, c)
514 fn = lambda s, c, **kwargs: oldfn(s, c)
515 l.append((mf, fn, params))
515 l.append((mf, fn, params))
516 self.filterpats[filter] = l
516 self.filterpats[filter] = l
517
517
518 for mf, fn, cmd in self.filterpats[filter]:
518 for mf, fn, cmd in self.filterpats[filter]:
519 if mf(filename):
519 if mf(filename):
520 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
520 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
521 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
521 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
522 break
522 break
523
523
524 return data
524 return data
525
525
526 def adddatafilter(self, name, filter):
526 def adddatafilter(self, name, filter):
527 self._datafilters[name] = filter
527 self._datafilters[name] = filter
528
528
529 def wread(self, filename):
529 def wread(self, filename):
530 if self._link(filename):
530 if self._link(filename):
531 data = os.readlink(self.wjoin(filename))
531 data = os.readlink(self.wjoin(filename))
532 else:
532 else:
533 data = self.wopener(filename, 'r').read()
533 data = self.wopener(filename, 'r').read()
534 return self._filter("encode", filename, data)
534 return self._filter("encode", filename, data)
535
535
536 def wwrite(self, filename, data, flags):
536 def wwrite(self, filename, data, flags):
537 data = self._filter("decode", filename, data)
537 data = self._filter("decode", filename, data)
538 try:
538 try:
539 os.unlink(self.wjoin(filename))
539 os.unlink(self.wjoin(filename))
540 except OSError:
540 except OSError:
541 pass
541 pass
542 if 'l' in flags:
542 if 'l' in flags:
543 self.wopener.symlink(data, filename)
543 self.wopener.symlink(data, filename)
544 else:
544 else:
545 self.wopener(filename, 'w').write(data)
545 self.wopener(filename, 'w').write(data)
546 if 'x' in flags:
546 if 'x' in flags:
547 util.set_flags(self.wjoin(filename), False, True)
547 util.set_flags(self.wjoin(filename), False, True)
548
548
549 def wwritedata(self, filename, data):
549 def wwritedata(self, filename, data):
550 return self._filter("decode", filename, data)
550 return self._filter("decode", filename, data)
551
551
552 def transaction(self):
552 def transaction(self):
553 tr = self._transref and self._transref() or None
553 tr = self._transref and self._transref() or None
554 if tr and tr.running():
554 if tr and tr.running():
555 return tr.nest()
555 return tr.nest()
556
556
557 # abort here if the journal already exists
557 # abort here if the journal already exists
558 if os.path.exists(self.sjoin("journal")):
558 if os.path.exists(self.sjoin("journal")):
559 raise error.RepoError(_("abandoned transaction found - run hg recover"))
559 raise error.RepoError(_("abandoned transaction found - run hg recover"))
560
560
561 # save dirstate for rollback
561 # save dirstate for rollback
562 try:
562 try:
563 ds = self.opener("dirstate").read()
563 ds = self.opener("dirstate").read()
564 except IOError:
564 except IOError:
565 ds = ""
565 ds = ""
566 self.opener("journal.dirstate", "w").write(ds)
566 self.opener("journal.dirstate", "w").write(ds)
567 self.opener("journal.branch", "w").write(self.dirstate.branch())
567 self.opener("journal.branch", "w").write(self.dirstate.branch())
568
568
569 renames = [(self.sjoin("journal"), self.sjoin("undo")),
569 renames = [(self.sjoin("journal"), self.sjoin("undo")),
570 (self.join("journal.dirstate"), self.join("undo.dirstate")),
570 (self.join("journal.dirstate"), self.join("undo.dirstate")),
571 (self.join("journal.branch"), self.join("undo.branch"))]
571 (self.join("journal.branch"), self.join("undo.branch"))]
572 tr = transaction.transaction(self.ui.warn, self.sopener,
572 tr = transaction.transaction(self.ui.warn, self.sopener,
573 self.sjoin("journal"),
573 self.sjoin("journal"),
574 aftertrans(renames),
574 aftertrans(renames),
575 self.store.createmode)
575 self.store.createmode)
576 self._transref = weakref.ref(tr)
576 self._transref = weakref.ref(tr)
577 return tr
577 return tr
578
578
579 def recover(self):
579 def recover(self):
580 lock = self.lock()
580 lock = self.lock()
581 try:
581 try:
582 if os.path.exists(self.sjoin("journal")):
582 if os.path.exists(self.sjoin("journal")):
583 self.ui.status(_("rolling back interrupted transaction\n"))
583 self.ui.status(_("rolling back interrupted transaction\n"))
584 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
584 transaction.rollback(self.sopener, self.sjoin("journal"), self.ui.warn)
585 self.invalidate()
585 self.invalidate()
586 return True
586 return True
587 else:
587 else:
588 self.ui.warn(_("no interrupted transaction available\n"))
588 self.ui.warn(_("no interrupted transaction available\n"))
589 return False
589 return False
590 finally:
590 finally:
591 lock.release()
591 lock.release()
592
592
593 def rollback(self):
593 def rollback(self):
594 wlock = lock = None
594 wlock = lock = None
595 try:
595 try:
596 wlock = self.wlock()
596 wlock = self.wlock()
597 lock = self.lock()
597 lock = self.lock()
598 if os.path.exists(self.sjoin("undo")):
598 if os.path.exists(self.sjoin("undo")):
599 self.ui.status(_("rolling back last transaction\n"))
599 self.ui.status(_("rolling back last transaction\n"))
600 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
600 transaction.rollback(self.sopener, self.sjoin("undo"), self.ui.warn)
601 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
601 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
602 try:
602 try:
603 branch = self.opener("undo.branch").read()
603 branch = self.opener("undo.branch").read()
604 self.dirstate.setbranch(branch)
604 self.dirstate.setbranch(branch)
605 except IOError:
605 except IOError:
606 self.ui.warn(_("Named branch could not be reset, "
606 self.ui.warn(_("Named branch could not be reset, "
607 "current branch still is: %s\n")
607 "current branch still is: %s\n")
608 % encoding.tolocal(self.dirstate.branch()))
608 % encoding.tolocal(self.dirstate.branch()))
609 self.invalidate()
609 self.invalidate()
610 self.dirstate.invalidate()
610 self.dirstate.invalidate()
611 self.destroyed()
611 self.destroyed()
612 else:
612 else:
613 self.ui.warn(_("no rollback information available\n"))
613 self.ui.warn(_("no rollback information available\n"))
614 finally:
614 finally:
615 release(lock, wlock)
615 release(lock, wlock)
616
616
617 def invalidate(self):
617 def invalidate(self):
618 for a in "changelog manifest".split():
618 for a in "changelog manifest".split():
619 if a in self.__dict__:
619 if a in self.__dict__:
620 delattr(self, a)
620 delattr(self, a)
621 self._tags = None
621 self._tags = None
622 self._tagtypes = None
622 self._tagtypes = None
623 self.nodetagscache = None
623 self.nodetagscache = None
624 self._branchcache = None # in UTF-8
624 self._branchcache = None # in UTF-8
625 self._branchcachetip = None
625 self._branchcachetip = None
626
626
627 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
627 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
628 try:
628 try:
629 l = lock.lock(lockname, 0, releasefn, desc=desc)
629 l = lock.lock(lockname, 0, releasefn, desc=desc)
630 except error.LockHeld, inst:
630 except error.LockHeld, inst:
631 if not wait:
631 if not wait:
632 raise
632 raise
633 self.ui.warn(_("waiting for lock on %s held by %r\n") %
633 self.ui.warn(_("waiting for lock on %s held by %r\n") %
634 (desc, inst.locker))
634 (desc, inst.locker))
635 # default to 600 seconds timeout
635 # default to 600 seconds timeout
636 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
636 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
637 releasefn, desc=desc)
637 releasefn, desc=desc)
638 if acquirefn:
638 if acquirefn:
639 acquirefn()
639 acquirefn()
640 return l
640 return l
641
641
642 def lock(self, wait=True):
642 def lock(self, wait=True):
643 '''Lock the repository store (.hg/store) and return a weak reference
643 '''Lock the repository store (.hg/store) and return a weak reference
644 to the lock. Use this before modifying the store (e.g. committing or
644 to the lock. Use this before modifying the store (e.g. committing or
645 stripping). If you are opening a transaction, get a lock as well.)'''
645 stripping). If you are opening a transaction, get a lock as well.)'''
646 l = self._lockref and self._lockref()
646 l = self._lockref and self._lockref()
647 if l is not None and l.held:
647 if l is not None and l.held:
648 l.lock()
648 l.lock()
649 return l
649 return l
650
650
651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
651 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
652 _('repository %s') % self.origroot)
652 _('repository %s') % self.origroot)
653 self._lockref = weakref.ref(l)
653 self._lockref = weakref.ref(l)
654 return l
654 return l
655
655
656 def wlock(self, wait=True):
656 def wlock(self, wait=True):
657 '''Lock the non-store parts of the repository (everything under
657 '''Lock the non-store parts of the repository (everything under
658 .hg except .hg/store) and return a weak reference to the lock.
658 .hg except .hg/store) and return a weak reference to the lock.
659 Use this before modifying files in .hg.'''
659 Use this before modifying files in .hg.'''
660 l = self._wlockref and self._wlockref()
660 l = self._wlockref and self._wlockref()
661 if l is not None and l.held:
661 if l is not None and l.held:
662 l.lock()
662 l.lock()
663 return l
663 return l
664
664
665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 self.dirstate.invalidate, _('working directory of %s') %
666 self.dirstate.invalidate, _('working directory of %s') %
667 self.origroot)
667 self.origroot)
668 self._wlockref = weakref.ref(l)
668 self._wlockref = weakref.ref(l)
669 return l
669 return l
670
670
671 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
671 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 """
672 """
673 commit an individual file as part of a larger transaction
673 commit an individual file as part of a larger transaction
674 """
674 """
675
675
676 fname = fctx.path()
676 fname = fctx.path()
677 text = fctx.data()
677 text = fctx.data()
678 flog = self.file(fname)
678 flog = self.file(fname)
679 fparent1 = manifest1.get(fname, nullid)
679 fparent1 = manifest1.get(fname, nullid)
680 fparent2 = fparent2o = manifest2.get(fname, nullid)
680 fparent2 = fparent2o = manifest2.get(fname, nullid)
681
681
682 meta = {}
682 meta = {}
683 copy = fctx.renamed()
683 copy = fctx.renamed()
684 if copy and copy[0] != fname:
684 if copy and copy[0] != fname:
685 # Mark the new revision of this file as a copy of another
685 # Mark the new revision of this file as a copy of another
686 # file. This copy data will effectively act as a parent
686 # file. This copy data will effectively act as a parent
687 # of this new revision. If this is a merge, the first
687 # of this new revision. If this is a merge, the first
688 # parent will be the nullid (meaning "look up the copy data")
688 # parent will be the nullid (meaning "look up the copy data")
689 # and the second one will be the other parent. For example:
689 # and the second one will be the other parent. For example:
690 #
690 #
691 # 0 --- 1 --- 3 rev1 changes file foo
691 # 0 --- 1 --- 3 rev1 changes file foo
692 # \ / rev2 renames foo to bar and changes it
692 # \ / rev2 renames foo to bar and changes it
693 # \- 2 -/ rev3 should have bar with all changes and
693 # \- 2 -/ rev3 should have bar with all changes and
694 # should record that bar descends from
694 # should record that bar descends from
695 # bar in rev2 and foo in rev1
695 # bar in rev2 and foo in rev1
696 #
696 #
697 # this allows this merge to succeed:
697 # this allows this merge to succeed:
698 #
698 #
699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 # \ / merging rev3 and rev4 should use bar@rev2
700 # \ / merging rev3 and rev4 should use bar@rev2
701 # \- 2 --- 4 as the merge base
701 # \- 2 --- 4 as the merge base
702 #
702 #
703
703
704 cfname = copy[0]
704 cfname = copy[0]
705 crev = manifest1.get(cfname)
705 crev = manifest1.get(cfname)
706 newfparent = fparent2
706 newfparent = fparent2
707
707
708 if manifest2: # branch merge
708 if manifest2: # branch merge
709 if fparent2 == nullid or crev is None: # copied on remote side
709 if fparent2 == nullid or crev is None: # copied on remote side
710 if cfname in manifest2:
710 if cfname in manifest2:
711 crev = manifest2[cfname]
711 crev = manifest2[cfname]
712 newfparent = fparent1
712 newfparent = fparent1
713
713
714 # find source in nearest ancestor if we've lost track
714 # find source in nearest ancestor if we've lost track
715 if not crev:
715 if not crev:
716 self.ui.debug(" %s: searching for copy revision for %s\n" %
716 self.ui.debug(" %s: searching for copy revision for %s\n" %
717 (fname, cfname))
717 (fname, cfname))
718 for ancestor in self['.'].ancestors():
718 for ancestor in self['.'].ancestors():
719 if cfname in ancestor:
719 if cfname in ancestor:
720 crev = ancestor[cfname].filenode()
720 crev = ancestor[cfname].filenode()
721 break
721 break
722
722
723 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
723 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
724 meta["copy"] = cfname
724 meta["copy"] = cfname
725 meta["copyrev"] = hex(crev)
725 meta["copyrev"] = hex(crev)
726 fparent1, fparent2 = nullid, newfparent
726 fparent1, fparent2 = nullid, newfparent
727 elif fparent2 != nullid:
727 elif fparent2 != nullid:
728 # is one parent an ancestor of the other?
728 # is one parent an ancestor of the other?
729 fparentancestor = flog.ancestor(fparent1, fparent2)
729 fparentancestor = flog.ancestor(fparent1, fparent2)
730 if fparentancestor == fparent1:
730 if fparentancestor == fparent1:
731 fparent1, fparent2 = fparent2, nullid
731 fparent1, fparent2 = fparent2, nullid
732 elif fparentancestor == fparent2:
732 elif fparentancestor == fparent2:
733 fparent2 = nullid
733 fparent2 = nullid
734
734
735 # is the file changed?
735 # is the file changed?
736 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
736 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
737 changelist.append(fname)
737 changelist.append(fname)
738 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
738 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
739
739
740 # are just the flags changed during merge?
740 # are just the flags changed during merge?
741 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
741 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
742 changelist.append(fname)
742 changelist.append(fname)
743
743
744 return fparent1
744 return fparent1
745
745
746 def commit(self, text="", user=None, date=None, match=None, force=False,
746 def commit(self, text="", user=None, date=None, match=None, force=False,
747 editor=False, extra={}):
747 editor=False, extra={}):
748 """Add a new revision to current repository.
748 """Add a new revision to current repository.
749
749
750 Revision information is gathered from the working directory,
750 Revision information is gathered from the working directory,
751 match can be used to filter the committed files. If editor is
751 match can be used to filter the committed files. If editor is
752 supplied, it is called to get a commit message.
752 supplied, it is called to get a commit message.
753 """
753 """
754
754
755 def fail(f, msg):
755 def fail(f, msg):
756 raise util.Abort('%s: %s' % (f, msg))
756 raise util.Abort('%s: %s' % (f, msg))
757
757
758 if not match:
758 if not match:
759 match = match_.always(self.root, '')
759 match = match_.always(self.root, '')
760
760
761 if not force:
761 if not force:
762 vdirs = []
762 vdirs = []
763 match.dir = vdirs.append
763 match.dir = vdirs.append
764 match.bad = fail
764 match.bad = fail
765
765
766 wlock = self.wlock()
766 wlock = self.wlock()
767 try:
767 try:
768 p1, p2 = self.dirstate.parents()
768 p1, p2 = self.dirstate.parents()
769 wctx = self[None]
769 wctx = self[None]
770
770
771 if (not force and p2 != nullid and match and
771 if (not force and p2 != nullid and match and
772 (match.files() or match.anypats())):
772 (match.files() or match.anypats())):
773 raise util.Abort(_('cannot partially commit a merge '
773 raise util.Abort(_('cannot partially commit a merge '
774 '(do not specify files or patterns)'))
774 '(do not specify files or patterns)'))
775
775
776 changes = self.status(match=match, clean=force)
776 changes = self.status(match=match, clean=force)
777 if force:
777 if force:
778 changes[0].extend(changes[6]) # mq may commit unchanged files
778 changes[0].extend(changes[6]) # mq may commit unchanged files
779
779
780 # check subrepos
780 # check subrepos
781 subs = []
781 subs = []
782 for s in wctx.substate:
782 for s in wctx.substate:
783 if match(s) and wctx.sub(s).dirty():
783 if match(s) and wctx.sub(s).dirty():
784 subs.append(s)
784 subs.append(s)
785 if subs and '.hgsubstate' not in changes[0]:
785 if subs and '.hgsubstate' not in changes[0]:
786 changes[0].insert(0, '.hgsubstate')
786 changes[0].insert(0, '.hgsubstate')
787
787
788 # make sure all explicit patterns are matched
788 # make sure all explicit patterns are matched
789 if not force and match.files():
789 if not force and match.files():
790 matched = set(changes[0] + changes[1] + changes[2])
790 matched = set(changes[0] + changes[1] + changes[2])
791
791
792 for f in match.files():
792 for f in match.files():
793 if f == '.' or f in matched or f in wctx.substate:
793 if f == '.' or f in matched or f in wctx.substate:
794 continue
794 continue
795 if f in changes[3]: # missing
795 if f in changes[3]: # missing
796 fail(f, _('file not found!'))
796 fail(f, _('file not found!'))
797 if f in vdirs: # visited directory
797 if f in vdirs: # visited directory
798 d = f + '/'
798 d = f + '/'
799 for mf in matched:
799 for mf in matched:
800 if mf.startswith(d):
800 if mf.startswith(d):
801 break
801 break
802 else:
802 else:
803 fail(f, _("no match under directory!"))
803 fail(f, _("no match under directory!"))
804 elif f not in self.dirstate:
804 elif f not in self.dirstate:
805 fail(f, _("file not tracked!"))
805 fail(f, _("file not tracked!"))
806
806
807 if (not force and not extra.get("close") and p2 == nullid
807 if (not force and not extra.get("close") and p2 == nullid
808 and not (changes[0] or changes[1] or changes[2])
808 and not (changes[0] or changes[1] or changes[2])
809 and self[None].branch() == self['.'].branch()):
809 and self[None].branch() == self['.'].branch()):
810 return None
810 return None
811
811
812 ms = merge_.mergestate(self)
812 ms = merge_.mergestate(self)
813 for f in changes[0]:
813 for f in changes[0]:
814 if f in ms and ms[f] == 'u':
814 if f in ms and ms[f] == 'u':
815 raise util.Abort(_("unresolved merge conflicts "
815 raise util.Abort(_("unresolved merge conflicts "
816 "(see hg resolve)"))
816 "(see hg resolve)"))
817
817
818 cctx = context.workingctx(self, (p1, p2), text, user, date,
818 cctx = context.workingctx(self, (p1, p2), text, user, date,
819 extra, changes)
819 extra, changes)
820 if editor:
820 if editor:
821 cctx._text = editor(self, cctx, subs)
821 cctx._text = editor(self, cctx, subs)
822
822
823 # commit subs
823 # commit subs
824 if subs:
824 if subs:
825 state = wctx.substate.copy()
825 state = wctx.substate.copy()
826 for s in subs:
826 for s in subs:
827 self.ui.status(_('committing subrepository %s\n') % s)
827 self.ui.status(_('committing subrepository %s\n') % s)
828 sr = wctx.sub(s).commit(cctx._text, user, date)
828 sr = wctx.sub(s).commit(cctx._text, user, date)
829 state[s] = (state[s][0], sr)
829 state[s] = (state[s][0], sr)
830 subrepo.writestate(self, state)
830 subrepo.writestate(self, state)
831
831
832 ret = self.commitctx(cctx, True)
832 ret = self.commitctx(cctx, True)
833
833
834 # update dirstate and mergestate
834 # update dirstate and mergestate
835 for f in changes[0] + changes[1]:
835 for f in changes[0] + changes[1]:
836 self.dirstate.normal(f)
836 self.dirstate.normal(f)
837 for f in changes[2]:
837 for f in changes[2]:
838 self.dirstate.forget(f)
838 self.dirstate.forget(f)
839 self.dirstate.setparents(ret)
839 self.dirstate.setparents(ret)
840 ms.reset()
840 ms.reset()
841
841
842 return ret
842 return ret
843
843
844 finally:
844 finally:
845 wlock.release()
845 wlock.release()
846
846
847 def commitctx(self, ctx, error=False):
847 def commitctx(self, ctx, error=False):
848 """Add a new revision to current repository.
848 """Add a new revision to current repository.
849
849
850 Revision information is passed via the context argument.
850 Revision information is passed via the context argument.
851 """
851 """
852
852
853 tr = lock = None
853 tr = lock = None
854 removed = ctx.removed()
854 removed = ctx.removed()
855 p1, p2 = ctx.p1(), ctx.p2()
855 p1, p2 = ctx.p1(), ctx.p2()
856 m1 = p1.manifest().copy()
856 m1 = p1.manifest().copy()
857 m2 = p2.manifest()
857 m2 = p2.manifest()
858 user = ctx.user()
858 user = ctx.user()
859
859
860 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
860 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
861 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
861 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
862
862
863 lock = self.lock()
863 lock = self.lock()
864 try:
864 try:
865 tr = self.transaction()
865 tr = self.transaction()
866 trp = weakref.proxy(tr)
866 trp = weakref.proxy(tr)
867
867
868 # check in files
868 # check in files
869 new = {}
869 new = {}
870 changed = []
870 changed = []
871 linkrev = len(self)
871 linkrev = len(self)
872 for f in sorted(ctx.modified() + ctx.added()):
872 for f in sorted(ctx.modified() + ctx.added()):
873 self.ui.note(f + "\n")
873 self.ui.note(f + "\n")
874 try:
874 try:
875 fctx = ctx[f]
875 fctx = ctx[f]
876 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
876 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
877 changed)
877 changed)
878 m1.set(f, fctx.flags())
878 m1.set(f, fctx.flags())
879 except (OSError, IOError):
879 except (OSError, IOError):
880 if error:
880 if error:
881 self.ui.warn(_("trouble committing %s!\n") % f)
881 self.ui.warn(_("trouble committing %s!\n") % f)
882 raise
882 raise
883 else:
883 else:
884 removed.append(f)
884 removed.append(f)
885
885
886 # update manifest
886 # update manifest
887 m1.update(new)
887 m1.update(new)
888 removed = [f for f in sorted(removed) if f in m1 or f in m2]
888 removed = [f for f in sorted(removed) if f in m1 or f in m2]
889 drop = [f for f in removed if f in m1]
889 drop = [f for f in removed if f in m1]
890 for f in drop:
890 for f in drop:
891 del m1[f]
891 del m1[f]
892 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
892 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
893 p2.manifestnode(), (new, drop))
893 p2.manifestnode(), (new, drop))
894
894
895 # update changelog
895 # update changelog
896 self.changelog.delayupdate()
896 self.changelog.delayupdate()
897 n = self.changelog.add(mn, changed + removed, ctx.description(),
897 n = self.changelog.add(mn, changed + removed, ctx.description(),
898 trp, p1.node(), p2.node(),
898 trp, p1.node(), p2.node(),
899 user, ctx.date(), ctx.extra().copy())
899 user, ctx.date(), ctx.extra().copy())
900 p = lambda: self.changelog.writepending() and self.root or ""
900 p = lambda: self.changelog.writepending() and self.root or ""
901 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
901 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
902 parent2=xp2, pending=p)
902 parent2=xp2, pending=p)
903 self.changelog.finalize(trp)
903 self.changelog.finalize(trp)
904 tr.close()
904 tr.close()
905
905
906 if self._branchcache:
906 if self._branchcache:
907 self.branchtags()
907 self.branchtags()
908
908
909 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
909 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
910 return n
910 return n
911 finally:
911 finally:
912 del tr
912 del tr
913 lock.release()
913 lock.release()
914
914
915 def destroyed(self):
915 def destroyed(self):
916 '''Inform the repository that nodes have been destroyed.
916 '''Inform the repository that nodes have been destroyed.
917 Intended for use by strip and rollback, so there's a common
917 Intended for use by strip and rollback, so there's a common
918 place for anything that has to be done after destroying history.'''
918 place for anything that has to be done after destroying history.'''
919 # XXX it might be nice if we could take the list of destroyed
919 # XXX it might be nice if we could take the list of destroyed
920 # nodes, but I don't see an easy way for rollback() to do that
920 # nodes, but I don't see an easy way for rollback() to do that
921
921
922 # Ensure the persistent tag cache is updated. Doing it now
922 # Ensure the persistent tag cache is updated. Doing it now
923 # means that the tag cache only has to worry about destroyed
923 # means that the tag cache only has to worry about destroyed
924 # heads immediately after a strip/rollback. That in turn
924 # heads immediately after a strip/rollback. That in turn
925 # guarantees that "cachetip == currenttip" (comparing both rev
925 # guarantees that "cachetip == currenttip" (comparing both rev
926 # and node) always means no nodes have been added or destroyed.
926 # and node) always means no nodes have been added or destroyed.
927
927
928 # XXX this is suboptimal when qrefresh'ing: we strip the current
928 # XXX this is suboptimal when qrefresh'ing: we strip the current
929 # head, refresh the tag cache, then immediately add a new head.
929 # head, refresh the tag cache, then immediately add a new head.
930 # But I think doing it this way is necessary for the "instant
930 # But I think doing it this way is necessary for the "instant
931 # tag cache retrieval" case to work.
931 # tag cache retrieval" case to work.
932 tags_.findglobaltags(self.ui, self, {}, {})
932 tags_.findglobaltags(self.ui, self, {}, {})
933
933
934 def walk(self, match, node=None):
934 def walk(self, match, node=None):
935 '''
935 '''
936 walk recursively through the directory tree or a given
936 walk recursively through the directory tree or a given
937 changeset, finding all files matched by the match
937 changeset, finding all files matched by the match
938 function
938 function
939 '''
939 '''
940 return self[node].walk(match)
940 return self[node].walk(match)
941
941
942 def status(self, node1='.', node2=None, match=None,
942 def status(self, node1='.', node2=None, match=None,
943 ignored=False, clean=False, unknown=False):
943 ignored=False, clean=False, unknown=False):
944 """return status of files between two nodes or node and working directory
944 """return status of files between two nodes or node and working directory
945
945
946 If node1 is None, use the first dirstate parent instead.
946 If node1 is None, use the first dirstate parent instead.
947 If node2 is None, compare node1 with working directory.
947 If node2 is None, compare node1 with working directory.
948 """
948 """
949
949
950 def mfmatches(ctx):
950 def mfmatches(ctx):
951 mf = ctx.manifest().copy()
951 mf = ctx.manifest().copy()
952 for fn in mf.keys():
952 for fn in mf.keys():
953 if not match(fn):
953 if not match(fn):
954 del mf[fn]
954 del mf[fn]
955 return mf
955 return mf
956
956
957 if isinstance(node1, context.changectx):
957 if isinstance(node1, context.changectx):
958 ctx1 = node1
958 ctx1 = node1
959 else:
959 else:
960 ctx1 = self[node1]
960 ctx1 = self[node1]
961 if isinstance(node2, context.changectx):
961 if isinstance(node2, context.changectx):
962 ctx2 = node2
962 ctx2 = node2
963 else:
963 else:
964 ctx2 = self[node2]
964 ctx2 = self[node2]
965
965
966 working = ctx2.rev() is None
966 working = ctx2.rev() is None
967 parentworking = working and ctx1 == self['.']
967 parentworking = working and ctx1 == self['.']
968 match = match or match_.always(self.root, self.getcwd())
968 match = match or match_.always(self.root, self.getcwd())
969 listignored, listclean, listunknown = ignored, clean, unknown
969 listignored, listclean, listunknown = ignored, clean, unknown
970
970
971 # load earliest manifest first for caching reasons
971 # load earliest manifest first for caching reasons
972 if not working and ctx2.rev() < ctx1.rev():
972 if not working and ctx2.rev() < ctx1.rev():
973 ctx2.manifest()
973 ctx2.manifest()
974
974
975 if not parentworking:
975 if not parentworking:
976 def bad(f, msg):
976 def bad(f, msg):
977 if f not in ctx1:
977 if f not in ctx1:
978 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
978 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
979 match.bad = bad
979 match.bad = bad
980
980
981 if working: # we need to scan the working dir
981 if working: # we need to scan the working dir
982 s = self.dirstate.status(match, listignored, listclean, listunknown)
982 s = self.dirstate.status(match, listignored, listclean, listunknown)
983 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
983 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
984
984
985 # check for any possibly clean files
985 # check for any possibly clean files
986 if parentworking and cmp:
986 if parentworking and cmp:
987 fixup = []
987 fixup = []
988 # do a full compare of any files that might have changed
988 # do a full compare of any files that might have changed
989 for f in sorted(cmp):
989 for f in sorted(cmp):
990 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
990 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
991 or ctx1[f].cmp(ctx2[f].data())):
991 or ctx1[f].cmp(ctx2[f].data())):
992 modified.append(f)
992 modified.append(f)
993 else:
993 else:
994 fixup.append(f)
994 fixup.append(f)
995
995
996 if listclean:
996 if listclean:
997 clean += fixup
997 clean += fixup
998
998
999 # update dirstate for files that are actually clean
999 # update dirstate for files that are actually clean
1000 if fixup:
1000 if fixup:
1001 try:
1001 try:
1002 # updating the dirstate is optional
1002 # updating the dirstate is optional
1003 # so we don't wait on the lock
1003 # so we don't wait on the lock
1004 wlock = self.wlock(False)
1004 wlock = self.wlock(False)
1005 try:
1005 try:
1006 for f in fixup:
1006 for f in fixup:
1007 self.dirstate.normal(f)
1007 self.dirstate.normal(f)
1008 finally:
1008 finally:
1009 wlock.release()
1009 wlock.release()
1010 except error.LockError:
1010 except error.LockError:
1011 pass
1011 pass
1012
1012
1013 if not parentworking:
1013 if not parentworking:
1014 mf1 = mfmatches(ctx1)
1014 mf1 = mfmatches(ctx1)
1015 if working:
1015 if working:
1016 # we are comparing working dir against non-parent
1016 # we are comparing working dir against non-parent
1017 # generate a pseudo-manifest for the working dir
1017 # generate a pseudo-manifest for the working dir
1018 mf2 = mfmatches(self['.'])
1018 mf2 = mfmatches(self['.'])
1019 for f in cmp + modified + added:
1019 for f in cmp + modified + added:
1020 mf2[f] = None
1020 mf2[f] = None
1021 mf2.set(f, ctx2.flags(f))
1021 mf2.set(f, ctx2.flags(f))
1022 for f in removed:
1022 for f in removed:
1023 if f in mf2:
1023 if f in mf2:
1024 del mf2[f]
1024 del mf2[f]
1025 else:
1025 else:
1026 # we are comparing two revisions
1026 # we are comparing two revisions
1027 deleted, unknown, ignored = [], [], []
1027 deleted, unknown, ignored = [], [], []
1028 mf2 = mfmatches(ctx2)
1028 mf2 = mfmatches(ctx2)
1029
1029
1030 modified, added, clean = [], [], []
1030 modified, added, clean = [], [], []
1031 for fn in mf2:
1031 for fn in mf2:
1032 if fn in mf1:
1032 if fn in mf1:
1033 if (mf1.flags(fn) != mf2.flags(fn) or
1033 if (mf1.flags(fn) != mf2.flags(fn) or
1034 (mf1[fn] != mf2[fn] and
1034 (mf1[fn] != mf2[fn] and
1035 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1035 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1036 modified.append(fn)
1036 modified.append(fn)
1037 elif listclean:
1037 elif listclean:
1038 clean.append(fn)
1038 clean.append(fn)
1039 del mf1[fn]
1039 del mf1[fn]
1040 else:
1040 else:
1041 added.append(fn)
1041 added.append(fn)
1042 removed = mf1.keys()
1042 removed = mf1.keys()
1043
1043
1044 r = modified, added, removed, deleted, unknown, ignored, clean
1044 r = modified, added, removed, deleted, unknown, ignored, clean
1045 [l.sort() for l in r]
1045 [l.sort() for l in r]
1046 return r
1046 return r
1047
1047
1048 def add(self, list):
1048 def add(self, list):
1049 wlock = self.wlock()
1049 wlock = self.wlock()
1050 try:
1050 try:
1051 rejected = []
1051 rejected = []
1052 for f in list:
1052 for f in list:
1053 p = self.wjoin(f)
1053 p = self.wjoin(f)
1054 try:
1054 try:
1055 st = os.lstat(p)
1055 st = os.lstat(p)
1056 except:
1056 except:
1057 self.ui.warn(_("%s does not exist!\n") % f)
1057 self.ui.warn(_("%s does not exist!\n") % f)
1058 rejected.append(f)
1058 rejected.append(f)
1059 continue
1059 continue
1060 if st.st_size > 10000000:
1060 if st.st_size > 10000000:
1061 self.ui.warn(_("%s: files over 10MB may cause memory and"
1061 self.ui.warn(_("%s: files over 10MB may cause memory and"
1062 " performance problems\n"
1062 " performance problems\n"
1063 "(use 'hg revert %s' to unadd the file)\n")
1063 "(use 'hg revert %s' to unadd the file)\n")
1064 % (f, f))
1064 % (f, f))
1065 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1065 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1066 self.ui.warn(_("%s not added: only files and symlinks "
1066 self.ui.warn(_("%s not added: only files and symlinks "
1067 "supported currently\n") % f)
1067 "supported currently\n") % f)
1068 rejected.append(p)
1068 rejected.append(p)
1069 elif self.dirstate[f] in 'amn':
1069 elif self.dirstate[f] in 'amn':
1070 self.ui.warn(_("%s already tracked!\n") % f)
1070 self.ui.warn(_("%s already tracked!\n") % f)
1071 elif self.dirstate[f] == 'r':
1071 elif self.dirstate[f] == 'r':
1072 self.dirstate.normallookup(f)
1072 self.dirstate.normallookup(f)
1073 else:
1073 else:
1074 self.dirstate.add(f)
1074 self.dirstate.add(f)
1075 return rejected
1075 return rejected
1076 finally:
1076 finally:
1077 wlock.release()
1077 wlock.release()
1078
1078
1079 def forget(self, list):
1079 def forget(self, list):
1080 wlock = self.wlock()
1080 wlock = self.wlock()
1081 try:
1081 try:
1082 for f in list:
1082 for f in list:
1083 if self.dirstate[f] != 'a':
1083 if self.dirstate[f] != 'a':
1084 self.ui.warn(_("%s not added!\n") % f)
1084 self.ui.warn(_("%s not added!\n") % f)
1085 else:
1085 else:
1086 self.dirstate.forget(f)
1086 self.dirstate.forget(f)
1087 finally:
1087 finally:
1088 wlock.release()
1088 wlock.release()
1089
1089
1090 def remove(self, list, unlink=False):
1090 def remove(self, list, unlink=False):
1091 if unlink:
1091 if unlink:
1092 for f in list:
1092 for f in list:
1093 try:
1093 try:
1094 util.unlink(self.wjoin(f))
1094 util.unlink(self.wjoin(f))
1095 except OSError, inst:
1095 except OSError, inst:
1096 if inst.errno != errno.ENOENT:
1096 if inst.errno != errno.ENOENT:
1097 raise
1097 raise
1098 wlock = self.wlock()
1098 wlock = self.wlock()
1099 try:
1099 try:
1100 for f in list:
1100 for f in list:
1101 if unlink and os.path.exists(self.wjoin(f)):
1101 if unlink and os.path.exists(self.wjoin(f)):
1102 self.ui.warn(_("%s still exists!\n") % f)
1102 self.ui.warn(_("%s still exists!\n") % f)
1103 elif self.dirstate[f] == 'a':
1103 elif self.dirstate[f] == 'a':
1104 self.dirstate.forget(f)
1104 self.dirstate.forget(f)
1105 elif f not in self.dirstate:
1105 elif f not in self.dirstate:
1106 self.ui.warn(_("%s not tracked!\n") % f)
1106 self.ui.warn(_("%s not tracked!\n") % f)
1107 else:
1107 else:
1108 self.dirstate.remove(f)
1108 self.dirstate.remove(f)
1109 finally:
1109 finally:
1110 wlock.release()
1110 wlock.release()
1111
1111
1112 def undelete(self, list):
1112 def undelete(self, list):
1113 manifests = [self.manifest.read(self.changelog.read(p)[0])
1113 manifests = [self.manifest.read(self.changelog.read(p)[0])
1114 for p in self.dirstate.parents() if p != nullid]
1114 for p in self.dirstate.parents() if p != nullid]
1115 wlock = self.wlock()
1115 wlock = self.wlock()
1116 try:
1116 try:
1117 for f in list:
1117 for f in list:
1118 if self.dirstate[f] != 'r':
1118 if self.dirstate[f] != 'r':
1119 self.ui.warn(_("%s not removed!\n") % f)
1119 self.ui.warn(_("%s not removed!\n") % f)
1120 else:
1120 else:
1121 m = f in manifests[0] and manifests[0] or manifests[1]
1121 m = f in manifests[0] and manifests[0] or manifests[1]
1122 t = self.file(f).read(m[f])
1122 t = self.file(f).read(m[f])
1123 self.wwrite(f, t, m.flags(f))
1123 self.wwrite(f, t, m.flags(f))
1124 self.dirstate.normal(f)
1124 self.dirstate.normal(f)
1125 finally:
1125 finally:
1126 wlock.release()
1126 wlock.release()
1127
1127
1128 def copy(self, source, dest):
1128 def copy(self, source, dest):
1129 p = self.wjoin(dest)
1129 p = self.wjoin(dest)
1130 if not (os.path.exists(p) or os.path.islink(p)):
1130 if not (os.path.exists(p) or os.path.islink(p)):
1131 self.ui.warn(_("%s does not exist!\n") % dest)
1131 self.ui.warn(_("%s does not exist!\n") % dest)
1132 elif not (os.path.isfile(p) or os.path.islink(p)):
1132 elif not (os.path.isfile(p) or os.path.islink(p)):
1133 self.ui.warn(_("copy failed: %s is not a file or a "
1133 self.ui.warn(_("copy failed: %s is not a file or a "
1134 "symbolic link\n") % dest)
1134 "symbolic link\n") % dest)
1135 else:
1135 else:
1136 wlock = self.wlock()
1136 wlock = self.wlock()
1137 try:
1137 try:
1138 if self.dirstate[dest] in '?r':
1138 if self.dirstate[dest] in '?r':
1139 self.dirstate.add(dest)
1139 self.dirstate.add(dest)
1140 self.dirstate.copy(source, dest)
1140 self.dirstate.copy(source, dest)
1141 finally:
1141 finally:
1142 wlock.release()
1142 wlock.release()
1143
1143
1144 def heads(self, start=None):
1144 def heads(self, start=None):
1145 heads = self.changelog.heads(start)
1145 heads = self.changelog.heads(start)
1146 # sort the output in rev descending order
1146 # sort the output in rev descending order
1147 heads = [(-self.changelog.rev(h), h) for h in heads]
1147 heads = [(-self.changelog.rev(h), h) for h in heads]
1148 return [n for (r, n) in sorted(heads)]
1148 return [n for (r, n) in sorted(heads)]
1149
1149
1150 def branchheads(self, branch=None, start=None, closed=False):
1150 def branchheads(self, branch=None, start=None, closed=False):
1151 '''return a (possibly filtered) list of heads for the given branch
1151 '''return a (possibly filtered) list of heads for the given branch
1152
1152
1153 Heads are returned in topological order, from newest to oldest.
1153 Heads are returned in topological order, from newest to oldest.
1154 If branch is None, use the dirstate branch.
1154 If branch is None, use the dirstate branch.
1155 If start is not None, return only heads reachable from start.
1155 If start is not None, return only heads reachable from start.
1156 If closed is True, return heads that are marked as closed as well.
1156 If closed is True, return heads that are marked as closed as well.
1157 '''
1157 '''
1158 if branch is None:
1158 if branch is None:
1159 branch = self[None].branch()
1159 branch = self[None].branch()
1160 branches = self.branchmap()
1160 branches = self.branchmap()
1161 if branch not in branches:
1161 if branch not in branches:
1162 return []
1162 return []
1163 # the cache returns heads ordered lowest to highest
1163 # the cache returns heads ordered lowest to highest
1164 bheads = list(reversed(branches[branch]))
1164 bheads = list(reversed(branches[branch]))
1165 if start is not None:
1165 if start is not None:
1166 # filter out the heads that cannot be reached from startrev
1166 # filter out the heads that cannot be reached from startrev
1167 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1167 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1168 bheads = [h for h in bheads if h in fbheads]
1168 bheads = [h for h in bheads if h in fbheads]
1169 if not closed:
1169 if not closed:
1170 bheads = [h for h in bheads if
1170 bheads = [h for h in bheads if
1171 ('close' not in self.changelog.read(h)[5])]
1171 ('close' not in self.changelog.read(h)[5])]
1172 return bheads
1172 return bheads
1173
1173
1174 def branches(self, nodes):
1174 def branches(self, nodes):
1175 if not nodes:
1175 if not nodes:
1176 nodes = [self.changelog.tip()]
1176 nodes = [self.changelog.tip()]
1177 b = []
1177 b = []
1178 for n in nodes:
1178 for n in nodes:
1179 t = n
1179 t = n
1180 while 1:
1180 while 1:
1181 p = self.changelog.parents(n)
1181 p = self.changelog.parents(n)
1182 if p[1] != nullid or p[0] == nullid:
1182 if p[1] != nullid or p[0] == nullid:
1183 b.append((t, n, p[0], p[1]))
1183 b.append((t, n, p[0], p[1]))
1184 break
1184 break
1185 n = p[0]
1185 n = p[0]
1186 return b
1186 return b
1187
1187
1188 def between(self, pairs):
1188 def between(self, pairs):
1189 r = []
1189 r = []
1190
1190
1191 for top, bottom in pairs:
1191 for top, bottom in pairs:
1192 n, l, i = top, [], 0
1192 n, l, i = top, [], 0
1193 f = 1
1193 f = 1
1194
1194
1195 while n != bottom and n != nullid:
1195 while n != bottom and n != nullid:
1196 p = self.changelog.parents(n)[0]
1196 p = self.changelog.parents(n)[0]
1197 if i == f:
1197 if i == f:
1198 l.append(n)
1198 l.append(n)
1199 f = f * 2
1199 f = f * 2
1200 n = p
1200 n = p
1201 i += 1
1201 i += 1
1202
1202
1203 r.append(l)
1203 r.append(l)
1204
1204
1205 return r
1205 return r
1206
1206
1207 def findincoming(self, remote, base=None, heads=None, force=False):
1207 def findincoming(self, remote, base=None, heads=None, force=False):
1208 """Return list of roots of the subsets of missing nodes from remote
1208 """Return list of roots of the subsets of missing nodes from remote
1209
1209
1210 If base dict is specified, assume that these nodes and their parents
1210 If base dict is specified, assume that these nodes and their parents
1211 exist on the remote side and that no child of a node of base exists
1211 exist on the remote side and that no child of a node of base exists
1212 in both remote and self.
1212 in both remote and self.
1213 Furthermore base will be updated to include the nodes that exists
1213 Furthermore base will be updated to include the nodes that exists
1214 in self and remote but no children exists in self and remote.
1214 in self and remote but no children exists in self and remote.
1215 If a list of heads is specified, return only nodes which are heads
1215 If a list of heads is specified, return only nodes which are heads
1216 or ancestors of these heads.
1216 or ancestors of these heads.
1217
1217
1218 All the ancestors of base are in self and in remote.
1218 All the ancestors of base are in self and in remote.
1219 All the descendants of the list returned are missing in self.
1219 All the descendants of the list returned are missing in self.
1220 (and so we know that the rest of the nodes are missing in remote, see
1220 (and so we know that the rest of the nodes are missing in remote, see
1221 outgoing)
1221 outgoing)
1222 """
1222 """
1223 return self.findcommonincoming(remote, base, heads, force)[1]
1223 return self.findcommonincoming(remote, base, heads, force)[1]
1224
1224
1225 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1225 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1226 """Return a tuple (common, missing roots, heads) used to identify
1226 """Return a tuple (common, missing roots, heads) used to identify
1227 missing nodes from remote.
1227 missing nodes from remote.
1228
1228
1229 If base dict is specified, assume that these nodes and their parents
1229 If base dict is specified, assume that these nodes and their parents
1230 exist on the remote side and that no child of a node of base exists
1230 exist on the remote side and that no child of a node of base exists
1231 in both remote and self.
1231 in both remote and self.
1232 Furthermore base will be updated to include the nodes that exists
1232 Furthermore base will be updated to include the nodes that exists
1233 in self and remote but no children exists in self and remote.
1233 in self and remote but no children exists in self and remote.
1234 If a list of heads is specified, return only nodes which are heads
1234 If a list of heads is specified, return only nodes which are heads
1235 or ancestors of these heads.
1235 or ancestors of these heads.
1236
1236
1237 All the ancestors of base are in self and in remote.
1237 All the ancestors of base are in self and in remote.
1238 """
1238 """
1239 m = self.changelog.nodemap
1239 m = self.changelog.nodemap
1240 search = []
1240 search = []
1241 fetch = set()
1241 fetch = set()
1242 seen = set()
1242 seen = set()
1243 seenbranch = set()
1243 seenbranch = set()
1244 if base is None:
1244 if base is None:
1245 base = {}
1245 base = {}
1246
1246
1247 if not heads:
1247 if not heads:
1248 heads = remote.heads()
1248 heads = remote.heads()
1249
1249
1250 if self.changelog.tip() == nullid:
1250 if self.changelog.tip() == nullid:
1251 base[nullid] = 1
1251 base[nullid] = 1
1252 if heads != [nullid]:
1252 if heads != [nullid]:
1253 return [nullid], [nullid], list(heads)
1253 return [nullid], [nullid], list(heads)
1254 return [nullid], [], []
1254 return [nullid], [], []
1255
1255
1256 # assume we're closer to the tip than the root
1256 # assume we're closer to the tip than the root
1257 # and start by examining the heads
1257 # and start by examining the heads
1258 self.ui.status(_("searching for changes\n"))
1258 self.ui.status(_("searching for changes\n"))
1259
1259
1260 unknown = []
1260 unknown = []
1261 for h in heads:
1261 for h in heads:
1262 if h not in m:
1262 if h not in m:
1263 unknown.append(h)
1263 unknown.append(h)
1264 else:
1264 else:
1265 base[h] = 1
1265 base[h] = 1
1266
1266
1267 heads = unknown
1267 heads = unknown
1268 if not unknown:
1268 if not unknown:
1269 return base.keys(), [], []
1269 return base.keys(), [], []
1270
1270
1271 req = set(unknown)
1271 req = set(unknown)
1272 reqcnt = 0
1272 reqcnt = 0
1273
1273
1274 # search through remote branches
1274 # search through remote branches
1275 # a 'branch' here is a linear segment of history, with four parts:
1275 # a 'branch' here is a linear segment of history, with four parts:
1276 # head, root, first parent, second parent
1276 # head, root, first parent, second parent
1277 # (a branch always has two parents (or none) by definition)
1277 # (a branch always has two parents (or none) by definition)
1278 unknown = remote.branches(unknown)
1278 unknown = remote.branches(unknown)
1279 while unknown:
1279 while unknown:
1280 r = []
1280 r = []
1281 while unknown:
1281 while unknown:
1282 n = unknown.pop(0)
1282 n = unknown.pop(0)
1283 if n[0] in seen:
1283 if n[0] in seen:
1284 continue
1284 continue
1285
1285
1286 self.ui.debug("examining %s:%s\n"
1286 self.ui.debug("examining %s:%s\n"
1287 % (short(n[0]), short(n[1])))
1287 % (short(n[0]), short(n[1])))
1288 if n[0] == nullid: # found the end of the branch
1288 if n[0] == nullid: # found the end of the branch
1289 pass
1289 pass
1290 elif n in seenbranch:
1290 elif n in seenbranch:
1291 self.ui.debug("branch already found\n")
1291 self.ui.debug("branch already found\n")
1292 continue
1292 continue
1293 elif n[1] and n[1] in m: # do we know the base?
1293 elif n[1] and n[1] in m: # do we know the base?
1294 self.ui.debug("found incomplete branch %s:%s\n"
1294 self.ui.debug("found incomplete branch %s:%s\n"
1295 % (short(n[0]), short(n[1])))
1295 % (short(n[0]), short(n[1])))
1296 search.append(n[0:2]) # schedule branch range for scanning
1296 search.append(n[0:2]) # schedule branch range for scanning
1297 seenbranch.add(n)
1297 seenbranch.add(n)
1298 else:
1298 else:
1299 if n[1] not in seen and n[1] not in fetch:
1299 if n[1] not in seen and n[1] not in fetch:
1300 if n[2] in m and n[3] in m:
1300 if n[2] in m and n[3] in m:
1301 self.ui.debug("found new changeset %s\n" %
1301 self.ui.debug("found new changeset %s\n" %
1302 short(n[1]))
1302 short(n[1]))
1303 fetch.add(n[1]) # earliest unknown
1303 fetch.add(n[1]) # earliest unknown
1304 for p in n[2:4]:
1304 for p in n[2:4]:
1305 if p in m:
1305 if p in m:
1306 base[p] = 1 # latest known
1306 base[p] = 1 # latest known
1307
1307
1308 for p in n[2:4]:
1308 for p in n[2:4]:
1309 if p not in req and p not in m:
1309 if p not in req and p not in m:
1310 r.append(p)
1310 r.append(p)
1311 req.add(p)
1311 req.add(p)
1312 seen.add(n[0])
1312 seen.add(n[0])
1313
1313
1314 if r:
1314 if r:
1315 reqcnt += 1
1315 reqcnt += 1
1316 self.ui.debug("request %d: %s\n" %
1316 self.ui.debug("request %d: %s\n" %
1317 (reqcnt, " ".join(map(short, r))))
1317 (reqcnt, " ".join(map(short, r))))
1318 for p in xrange(0, len(r), 10):
1318 for p in xrange(0, len(r), 10):
1319 for b in remote.branches(r[p:p+10]):
1319 for b in remote.branches(r[p:p+10]):
1320 self.ui.debug("received %s:%s\n" %
1320 self.ui.debug("received %s:%s\n" %
1321 (short(b[0]), short(b[1])))
1321 (short(b[0]), short(b[1])))
1322 unknown.append(b)
1322 unknown.append(b)
1323
1323
1324 # do binary search on the branches we found
1324 # do binary search on the branches we found
1325 while search:
1325 while search:
1326 newsearch = []
1326 newsearch = []
1327 reqcnt += 1
1327 reqcnt += 1
1328 for n, l in zip(search, remote.between(search)):
1328 for n, l in zip(search, remote.between(search)):
1329 l.append(n[1])
1329 l.append(n[1])
1330 p = n[0]
1330 p = n[0]
1331 f = 1
1331 f = 1
1332 for i in l:
1332 for i in l:
1333 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1333 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1334 if i in m:
1334 if i in m:
1335 if f <= 2:
1335 if f <= 2:
1336 self.ui.debug("found new branch changeset %s\n" %
1336 self.ui.debug("found new branch changeset %s\n" %
1337 short(p))
1337 short(p))
1338 fetch.add(p)
1338 fetch.add(p)
1339 base[i] = 1
1339 base[i] = 1
1340 else:
1340 else:
1341 self.ui.debug("narrowed branch search to %s:%s\n"
1341 self.ui.debug("narrowed branch search to %s:%s\n"
1342 % (short(p), short(i)))
1342 % (short(p), short(i)))
1343 newsearch.append((p, i))
1343 newsearch.append((p, i))
1344 break
1344 break
1345 p, f = i, f * 2
1345 p, f = i, f * 2
1346 search = newsearch
1346 search = newsearch
1347
1347
1348 # sanity check our fetch list
1348 # sanity check our fetch list
1349 for f in fetch:
1349 for f in fetch:
1350 if f in m:
1350 if f in m:
1351 raise error.RepoError(_("already have changeset ")
1351 raise error.RepoError(_("already have changeset ")
1352 + short(f[:4]))
1352 + short(f[:4]))
1353
1353
1354 if base.keys() == [nullid]:
1354 if base.keys() == [nullid]:
1355 if force:
1355 if force:
1356 self.ui.warn(_("warning: repository is unrelated\n"))
1356 self.ui.warn(_("warning: repository is unrelated\n"))
1357 else:
1357 else:
1358 raise util.Abort(_("repository is unrelated"))
1358 raise util.Abort(_("repository is unrelated"))
1359
1359
1360 self.ui.debug("found new changesets starting at " +
1360 self.ui.debug("found new changesets starting at " +
1361 " ".join([short(f) for f in fetch]) + "\n")
1361 " ".join([short(f) for f in fetch]) + "\n")
1362
1362
1363 self.ui.debug("%d total queries\n" % reqcnt)
1363 self.ui.debug("%d total queries\n" % reqcnt)
1364
1364
1365 return base.keys(), list(fetch), heads
1365 return base.keys(), list(fetch), heads
1366
1366
1367 def findoutgoing(self, remote, base=None, heads=None, force=False):
1367 def findoutgoing(self, remote, base=None, heads=None, force=False):
1368 """Return list of nodes that are roots of subsets not in remote
1368 """Return list of nodes that are roots of subsets not in remote
1369
1369
1370 If base dict is specified, assume that these nodes and their parents
1370 If base dict is specified, assume that these nodes and their parents
1371 exist on the remote side.
1371 exist on the remote side.
1372 If a list of heads is specified, return only nodes which are heads
1372 If a list of heads is specified, return only nodes which are heads
1373 or ancestors of these heads, and return a second element which
1373 or ancestors of these heads, and return a second element which
1374 contains all remote heads which get new children.
1374 contains all remote heads which get new children.
1375 """
1375 """
1376 if base is None:
1376 if base is None:
1377 base = {}
1377 base = {}
1378 self.findincoming(remote, base, heads, force=force)
1378 self.findincoming(remote, base, heads, force=force)
1379
1379
1380 self.ui.debug("common changesets up to "
1380 self.ui.debug("common changesets up to "
1381 + " ".join(map(short, base.keys())) + "\n")
1381 + " ".join(map(short, base.keys())) + "\n")
1382
1382
1383 remain = set(self.changelog.nodemap)
1383 remain = set(self.changelog.nodemap)
1384
1384
1385 # prune everything remote has from the tree
1385 # prune everything remote has from the tree
1386 remain.remove(nullid)
1386 remain.remove(nullid)
1387 remove = base.keys()
1387 remove = base.keys()
1388 while remove:
1388 while remove:
1389 n = remove.pop(0)
1389 n = remove.pop(0)
1390 if n in remain:
1390 if n in remain:
1391 remain.remove(n)
1391 remain.remove(n)
1392 for p in self.changelog.parents(n):
1392 for p in self.changelog.parents(n):
1393 remove.append(p)
1393 remove.append(p)
1394
1394
1395 # find every node whose parents have been pruned
1395 # find every node whose parents have been pruned
1396 subset = []
1396 subset = []
1397 # find every remote head that will get new children
1397 # find every remote head that will get new children
1398 updated_heads = set()
1398 updated_heads = set()
1399 for n in remain:
1399 for n in remain:
1400 p1, p2 = self.changelog.parents(n)
1400 p1, p2 = self.changelog.parents(n)
1401 if p1 not in remain and p2 not in remain:
1401 if p1 not in remain and p2 not in remain:
1402 subset.append(n)
1402 subset.append(n)
1403 if heads:
1403 if heads:
1404 if p1 in heads:
1404 if p1 in heads:
1405 updated_heads.add(p1)
1405 updated_heads.add(p1)
1406 if p2 in heads:
1406 if p2 in heads:
1407 updated_heads.add(p2)
1407 updated_heads.add(p2)
1408
1408
1409 # this is the set of all roots we have to push
1409 # this is the set of all roots we have to push
1410 if heads:
1410 if heads:
1411 return subset, list(updated_heads)
1411 return subset, list(updated_heads)
1412 else:
1412 else:
1413 return subset
1413 return subset
1414
1414
1415 def pull(self, remote, heads=None, force=False):
1415 def pull(self, remote, heads=None, force=False):
1416 lock = self.lock()
1416 lock = self.lock()
1417 try:
1417 try:
1418 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1418 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1419 force=force)
1419 force=force)
1420 if fetch == [nullid]:
1420 if fetch == [nullid]:
1421 self.ui.status(_("requesting all changes\n"))
1421 self.ui.status(_("requesting all changes\n"))
1422
1422
1423 if not fetch:
1423 if not fetch:
1424 self.ui.status(_("no changes found\n"))
1424 self.ui.status(_("no changes found\n"))
1425 return 0
1425 return 0
1426
1426
1427 if heads is None and remote.capable('changegroupsubset'):
1427 if heads is None and remote.capable('changegroupsubset'):
1428 heads = rheads
1428 heads = rheads
1429
1429
1430 if heads is None:
1430 if heads is None:
1431 cg = remote.changegroup(fetch, 'pull')
1431 cg = remote.changegroup(fetch, 'pull')
1432 else:
1432 else:
1433 if not remote.capable('changegroupsubset'):
1433 if not remote.capable('changegroupsubset'):
1434 raise util.Abort(_("Partial pull cannot be done because "
1434 raise util.Abort(_("Partial pull cannot be done because "
1435 "other repository doesn't support "
1435 "other repository doesn't support "
1436 "changegroupsubset."))
1436 "changegroupsubset."))
1437 cg = remote.changegroupsubset(fetch, heads, 'pull')
1437 cg = remote.changegroupsubset(fetch, heads, 'pull')
1438 return self.addchangegroup(cg, 'pull', remote.url())
1438 return self.addchangegroup(cg, 'pull', remote.url())
1439 finally:
1439 finally:
1440 lock.release()
1440 lock.release()
1441
1441
1442 def push(self, remote, force=False, revs=None):
1442 def push(self, remote, force=False, revs=None):
1443 # there are two ways to push to remote repo:
1443 # there are two ways to push to remote repo:
1444 #
1444 #
1445 # addchangegroup assumes local user can lock remote
1445 # addchangegroup assumes local user can lock remote
1446 # repo (local filesystem, old ssh servers).
1446 # repo (local filesystem, old ssh servers).
1447 #
1447 #
1448 # unbundle assumes local user cannot lock remote repo (new ssh
1448 # unbundle assumes local user cannot lock remote repo (new ssh
1449 # servers, http servers).
1449 # servers, http servers).
1450
1450
1451 if remote.capable('unbundle'):
1451 if remote.capable('unbundle'):
1452 return self.push_unbundle(remote, force, revs)
1452 return self.push_unbundle(remote, force, revs)
1453 return self.push_addchangegroup(remote, force, revs)
1453 return self.push_addchangegroup(remote, force, revs)
1454
1454
1455 def prepush(self, remote, force, revs):
1455 def prepush(self, remote, force, revs):
1456 '''Analyze the local and remote repositories and determine which
1456 '''Analyze the local and remote repositories and determine which
1457 changesets need to be pushed to the remote. Return a tuple
1457 changesets need to be pushed to the remote. Return a tuple
1458 (changegroup, remoteheads). changegroup is a readable file-like
1458 (changegroup, remoteheads). changegroup is a readable file-like
1459 object whose read() returns successive changegroup chunks ready to
1459 object whose read() returns successive changegroup chunks ready to
1460 be sent over the wire. remoteheads is the list of remote heads.
1460 be sent over the wire. remoteheads is the list of remote heads.
1461 '''
1461 '''
1462 common = {}
1462 common = {}
1463 remote_heads = remote.heads()
1463 remote_heads = remote.heads()
1464 inc = self.findincoming(remote, common, remote_heads, force=force)
1464 inc = self.findincoming(remote, common, remote_heads, force=force)
1465
1465
1466 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1466 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1467 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1467 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1468
1468
1469 def checkbranch(lheads, rheads, updatelb):
1469 def checkbranch(lheads, rheads, updatelb):
1470 '''
1470 '''
1471 check whether there are more local heads than remote heads on
1471 check whether there are more local heads than remote heads on
1472 a specific branch.
1472 a specific branch.
1473
1473
1474 lheads: local branch heads
1474 lheads: local branch heads
1475 rheads: remote branch heads
1475 rheads: remote branch heads
1476 updatelb: outgoing local branch bases
1476 updatelb: outgoing local branch bases
1477 '''
1477 '''
1478
1478
1479 warn = 0
1479 warn = 0
1480
1480
1481 if not revs and len(lheads) > len(rheads):
1481 if not revs and len(lheads) > len(rheads):
1482 warn = 1
1482 warn = 1
1483 else:
1483 else:
1484 # add local heads involved in the push
1484 # add local heads involved in the push
1485 updatelheads = [self.changelog.heads(x, lheads)
1485 updatelheads = [self.changelog.heads(x, lheads)
1486 for x in updatelb]
1486 for x in updatelb]
1487 newheads = set(sum(updatelheads, [])) & set(lheads)
1487 newheads = set(sum(updatelheads, [])) & set(lheads)
1488
1488
1489 if not newheads:
1489 if not newheads:
1490 return True
1490 return True
1491
1491
1492 # add heads we don't have or that are not involved in the push
1492 # add heads we don't have or that are not involved in the push
1493 for r in rheads:
1493 for r in rheads:
1494 if r in self.changelog.nodemap:
1494 if r in self.changelog.nodemap:
1495 desc = self.changelog.heads(r, heads)
1495 desc = self.changelog.heads(r, heads)
1496 l = [h for h in heads if h in desc]
1496 l = [h for h in heads if h in desc]
1497 if not l:
1497 if not l:
1498 newheads.add(r)
1498 newheads.add(r)
1499 else:
1499 else:
1500 newheads.add(r)
1500 newheads.add(r)
1501 if len(newheads) > len(rheads):
1501 if len(newheads) > len(rheads):
1502 warn = 1
1502 warn = 1
1503
1503
1504 if warn:
1504 if warn:
1505 if not rheads: # new branch requires --force
1505 if not rheads: # new branch requires --force
1506 self.ui.warn(_("abort: push creates new"
1506 self.ui.warn(_("abort: push creates new"
1507 " remote branch '%s'!\n") %
1507 " remote branch '%s'!\n") %
1508 self[updatelb[0]].branch())
1508 self[updatelb[0]].branch())
1509 else:
1509 else:
1510 self.ui.warn(_("abort: push creates new remote heads!\n"))
1510 self.ui.warn(_("abort: push creates new remote heads!\n"))
1511
1511
1512 self.ui.status(_("(did you forget to merge?"
1512 self.ui.status(_("(did you forget to merge?"
1513 " use push -f to force)\n"))
1513 " use push -f to force)\n"))
1514 return False
1514 return False
1515 return True
1515 return True
1516
1516
1517 if not bases:
1517 if not bases:
1518 self.ui.status(_("no changes found\n"))
1518 self.ui.status(_("no changes found\n"))
1519 return None, 1
1519 return None, 1
1520 elif not force:
1520 elif not force:
1521 # Check for each named branch if we're creating new remote heads.
1521 # Check for each named branch if we're creating new remote heads.
1522 # To be a remote head after push, node must be either:
1522 # To be a remote head after push, node must be either:
1523 # - unknown locally
1523 # - unknown locally
1524 # - a local outgoing head descended from update
1524 # - a local outgoing head descended from update
1525 # - a remote head that's known locally and not
1525 # - a remote head that's known locally and not
1526 # ancestral to an outgoing head
1526 # ancestral to an outgoing head
1527 #
1527 #
1528 # New named branches cannot be created without --force.
1528 # New named branches cannot be created without --force.
1529
1529
1530 if remote_heads != [nullid]:
1530 if remote_heads != [nullid]:
1531 if remote.capable('branchmap'):
1531 if remote.capable('branchmap'):
1532 localhds = {}
1532 localhds = {}
1533 if not revs:
1533 if not revs:
1534 localhds = self.branchmap()
1534 localhds = self.branchmap()
1535 else:
1535 else:
1536 for n in heads:
1536 for n in heads:
1537 branch = self[n].branch()
1537 branch = self[n].branch()
1538 if branch in localhds:
1538 if branch in localhds:
1539 localhds[branch].append(n)
1539 localhds[branch].append(n)
1540 else:
1540 else:
1541 localhds[branch] = [n]
1541 localhds[branch] = [n]
1542
1542
1543 remotehds = remote.branchmap()
1543 remotehds = remote.branchmap()
1544
1544
1545 for lh in localhds:
1545 for lh in localhds:
1546 if lh in remotehds:
1546 if lh in remotehds:
1547 rheads = remotehds[lh]
1547 rheads = remotehds[lh]
1548 else:
1548 else:
1549 rheads = []
1549 rheads = []
1550 lheads = localhds[lh]
1550 lheads = localhds[lh]
1551 updatelb = [upd for upd in update
1551 updatelb = [upd for upd in update
1552 if self[upd].branch() == lh]
1552 if self[upd].branch() == lh]
1553 if not updatelb:
1553 if not updatelb:
1554 continue
1554 continue
1555 if not checkbranch(lheads, rheads, updatelb):
1555 if not checkbranch(lheads, rheads, updatelb):
1556 return None, 0
1556 return None, 0
1557 else:
1557 else:
1558 if not checkbranch(heads, remote_heads, update):
1558 if not checkbranch(heads, remote_heads, update):
1559 return None, 0
1559 return None, 0
1560
1560
1561 if inc:
1561 if inc:
1562 self.ui.warn(_("note: unsynced remote changes!\n"))
1562 self.ui.warn(_("note: unsynced remote changes!\n"))
1563
1563
1564
1564
1565 if revs is None:
1565 if revs is None:
1566 # use the fast path, no race possible on push
1566 # use the fast path, no race possible on push
1567 cg = self._changegroup(common.keys(), 'push')
1567 nodes = self.changelog.findmissing(common.keys())
1568 cg = self._changegroup(nodes, 'push')
1568 else:
1569 else:
1569 cg = self.changegroupsubset(update, revs, 'push')
1570 cg = self.changegroupsubset(update, revs, 'push')
1570 return cg, remote_heads
1571 return cg, remote_heads
1571
1572
1572 def push_addchangegroup(self, remote, force, revs):
1573 def push_addchangegroup(self, remote, force, revs):
1573 lock = remote.lock()
1574 lock = remote.lock()
1574 try:
1575 try:
1575 ret = self.prepush(remote, force, revs)
1576 ret = self.prepush(remote, force, revs)
1576 if ret[0] is not None:
1577 if ret[0] is not None:
1577 cg, remote_heads = ret
1578 cg, remote_heads = ret
1578 return remote.addchangegroup(cg, 'push', self.url())
1579 return remote.addchangegroup(cg, 'push', self.url())
1579 return ret[1]
1580 return ret[1]
1580 finally:
1581 finally:
1581 lock.release()
1582 lock.release()
1582
1583
1583 def push_unbundle(self, remote, force, revs):
1584 def push_unbundle(self, remote, force, revs):
1584 # local repo finds heads on server, finds out what revs it
1585 # local repo finds heads on server, finds out what revs it
1585 # must push. once revs transferred, if server finds it has
1586 # must push. once revs transferred, if server finds it has
1586 # different heads (someone else won commit/push race), server
1587 # different heads (someone else won commit/push race), server
1587 # aborts.
1588 # aborts.
1588
1589
1589 ret = self.prepush(remote, force, revs)
1590 ret = self.prepush(remote, force, revs)
1590 if ret[0] is not None:
1591 if ret[0] is not None:
1591 cg, remote_heads = ret
1592 cg, remote_heads = ret
1592 if force: remote_heads = ['force']
1593 if force: remote_heads = ['force']
1593 return remote.unbundle(cg, remote_heads, 'push')
1594 return remote.unbundle(cg, remote_heads, 'push')
1594 return ret[1]
1595 return ret[1]
1595
1596
1596 def changegroupinfo(self, nodes, source):
1597 def changegroupinfo(self, nodes, source):
1597 if self.ui.verbose or source == 'bundle':
1598 if self.ui.verbose or source == 'bundle':
1598 self.ui.status(_("%d changesets found\n") % len(nodes))
1599 self.ui.status(_("%d changesets found\n") % len(nodes))
1599 if self.ui.debugflag:
1600 if self.ui.debugflag:
1600 self.ui.debug("list of changesets:\n")
1601 self.ui.debug("list of changesets:\n")
1601 for node in nodes:
1602 for node in nodes:
1602 self.ui.debug("%s\n" % hex(node))
1603 self.ui.debug("%s\n" % hex(node))
1603
1604
1604 def changegroupsubset(self, bases, heads, source, extranodes=None):
1605 def changegroupsubset(self, bases, heads, source, extranodes=None):
1605 """Compute a changegroup consisting of all the nodes that are
1606 """Compute a changegroup consisting of all the nodes that are
1606 descendents of any of the bases and ancestors of any of the heads.
1607 descendents of any of the bases and ancestors of any of the heads.
1607 Return a chunkbuffer object whose read() method will return
1608 Return a chunkbuffer object whose read() method will return
1608 successive changegroup chunks.
1609 successive changegroup chunks.
1609
1610
1610 It is fairly complex as determining which filenodes and which
1611 It is fairly complex as determining which filenodes and which
1611 manifest nodes need to be included for the changeset to be complete
1612 manifest nodes need to be included for the changeset to be complete
1612 is non-trivial.
1613 is non-trivial.
1613
1614
1614 Another wrinkle is doing the reverse, figuring out which changeset in
1615 Another wrinkle is doing the reverse, figuring out which changeset in
1615 the changegroup a particular filenode or manifestnode belongs to.
1616 the changegroup a particular filenode or manifestnode belongs to.
1616
1617
1617 The caller can specify some nodes that must be included in the
1618 The caller can specify some nodes that must be included in the
1618 changegroup using the extranodes argument. It should be a dict
1619 changegroup using the extranodes argument. It should be a dict
1619 where the keys are the filenames (or 1 for the manifest), and the
1620 where the keys are the filenames (or 1 for the manifest), and the
1620 values are lists of (node, linknode) tuples, where node is a wanted
1621 values are lists of (node, linknode) tuples, where node is a wanted
1621 node and linknode is the changelog node that should be transmitted as
1622 node and linknode is the changelog node that should be transmitted as
1622 the linkrev.
1623 the linkrev.
1623 """
1624 """
1624
1625
1626 # Set up some initial variables
1627 # Make it easy to refer to self.changelog
1628 cl = self.changelog
1629 # msng is short for missing - compute the list of changesets in this
1630 # changegroup.
1631 if not bases:
1632 bases = [nullid]
1633 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1634
1625 if extranodes is None:
1635 if extranodes is None:
1626 # can we go through the fast path ?
1636 # can we go through the fast path ?
1627 heads.sort()
1637 heads.sort()
1628 allheads = self.heads()
1638 allheads = self.heads()
1629 allheads.sort()
1639 allheads.sort()
1630 if heads == allheads:
1640 if heads == allheads:
1631 common = []
1641 return self._changegroup(msng_cl_lst, source)
1632 # parents of bases are known from both sides
1633 for n in bases:
1634 for p in self.changelog.parents(n):
1635 if p != nullid:
1636 common.append(p)
1637 return self._changegroup(common, source)
1638
1642
1643 # slow path
1639 self.hook('preoutgoing', throw=True, source=source)
1644 self.hook('preoutgoing', throw=True, source=source)
1640
1645
1641 # Set up some initial variables
1642 # Make it easy to refer to self.changelog
1643 cl = self.changelog
1644 # msng is short for missing - compute the list of changesets in this
1645 # changegroup.
1646 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1647 self.changegroupinfo(msng_cl_lst, source)
1646 self.changegroupinfo(msng_cl_lst, source)
1648 # Some bases may turn out to be superfluous, and some heads may be
1647 # Some bases may turn out to be superfluous, and some heads may be
1649 # too. nodesbetween will return the minimal set of bases and heads
1648 # too. nodesbetween will return the minimal set of bases and heads
1650 # necessary to re-create the changegroup.
1649 # necessary to re-create the changegroup.
1651
1650
1652 # Known heads are the list of heads that it is assumed the recipient
1651 # Known heads are the list of heads that it is assumed the recipient
1653 # of this changegroup will know about.
1652 # of this changegroup will know about.
1654 knownheads = set()
1653 knownheads = set()
1655 # We assume that all parents of bases are known heads.
1654 # We assume that all parents of bases are known heads.
1656 for n in bases:
1655 for n in bases:
1657 knownheads.update(cl.parents(n))
1656 knownheads.update(cl.parents(n))
1658 knownheads.discard(nullid)
1657 knownheads.discard(nullid)
1659 knownheads = list(knownheads)
1658 knownheads = list(knownheads)
1660 if knownheads:
1659 if knownheads:
1661 # Now that we know what heads are known, we can compute which
1660 # Now that we know what heads are known, we can compute which
1662 # changesets are known. The recipient must know about all
1661 # changesets are known. The recipient must know about all
1663 # changesets required to reach the known heads from the null
1662 # changesets required to reach the known heads from the null
1664 # changeset.
1663 # changeset.
1665 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1664 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1666 junk = None
1665 junk = None
1667 # Transform the list into a set.
1666 # Transform the list into a set.
1668 has_cl_set = set(has_cl_set)
1667 has_cl_set = set(has_cl_set)
1669 else:
1668 else:
1670 # If there were no known heads, the recipient cannot be assumed to
1669 # If there were no known heads, the recipient cannot be assumed to
1671 # know about any changesets.
1670 # know about any changesets.
1672 has_cl_set = set()
1671 has_cl_set = set()
1673
1672
1674 # Make it easy to refer to self.manifest
1673 # Make it easy to refer to self.manifest
1675 mnfst = self.manifest
1674 mnfst = self.manifest
1676 # We don't know which manifests are missing yet
1675 # We don't know which manifests are missing yet
1677 msng_mnfst_set = {}
1676 msng_mnfst_set = {}
1678 # Nor do we know which filenodes are missing.
1677 # Nor do we know which filenodes are missing.
1679 msng_filenode_set = {}
1678 msng_filenode_set = {}
1680
1679
1681 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1680 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1682 junk = None
1681 junk = None
1683
1682
1684 # A changeset always belongs to itself, so the changenode lookup
1683 # A changeset always belongs to itself, so the changenode lookup
1685 # function for a changenode is identity.
1684 # function for a changenode is identity.
1686 def identity(x):
1685 def identity(x):
1687 return x
1686 return x
1688
1687
1689 # If we determine that a particular file or manifest node must be a
1688 # If we determine that a particular file or manifest node must be a
1690 # node that the recipient of the changegroup will already have, we can
1689 # node that the recipient of the changegroup will already have, we can
1691 # also assume the recipient will have all the parents. This function
1690 # also assume the recipient will have all the parents. This function
1692 # prunes them from the set of missing nodes.
1691 # prunes them from the set of missing nodes.
1693 def prune_parents(revlog, hasset, msngset):
1692 def prune_parents(revlog, hasset, msngset):
1694 haslst = list(hasset)
1693 haslst = list(hasset)
1695 haslst.sort(key=revlog.rev)
1694 haslst.sort(key=revlog.rev)
1696 for node in haslst:
1695 for node in haslst:
1697 parentlst = [p for p in revlog.parents(node) if p != nullid]
1696 parentlst = [p for p in revlog.parents(node) if p != nullid]
1698 while parentlst:
1697 while parentlst:
1699 n = parentlst.pop()
1698 n = parentlst.pop()
1700 if n not in hasset:
1699 if n not in hasset:
1701 hasset.add(n)
1700 hasset.add(n)
1702 p = [p for p in revlog.parents(n) if p != nullid]
1701 p = [p for p in revlog.parents(n) if p != nullid]
1703 parentlst.extend(p)
1702 parentlst.extend(p)
1704 for n in hasset:
1703 for n in hasset:
1705 msngset.pop(n, None)
1704 msngset.pop(n, None)
1706
1705
1707 # This is a function generating function used to set up an environment
1706 # This is a function generating function used to set up an environment
1708 # for the inner function to execute in.
1707 # for the inner function to execute in.
1709 def manifest_and_file_collector(changedfileset):
1708 def manifest_and_file_collector(changedfileset):
1710 # This is an information gathering function that gathers
1709 # This is an information gathering function that gathers
1711 # information from each changeset node that goes out as part of
1710 # information from each changeset node that goes out as part of
1712 # the changegroup. The information gathered is a list of which
1711 # the changegroup. The information gathered is a list of which
1713 # manifest nodes are potentially required (the recipient may
1712 # manifest nodes are potentially required (the recipient may
1714 # already have them) and total list of all files which were
1713 # already have them) and total list of all files which were
1715 # changed in any changeset in the changegroup.
1714 # changed in any changeset in the changegroup.
1716 #
1715 #
1717 # We also remember the first changenode we saw any manifest
1716 # We also remember the first changenode we saw any manifest
1718 # referenced by so we can later determine which changenode 'owns'
1717 # referenced by so we can later determine which changenode 'owns'
1719 # the manifest.
1718 # the manifest.
1720 def collect_manifests_and_files(clnode):
1719 def collect_manifests_and_files(clnode):
1721 c = cl.read(clnode)
1720 c = cl.read(clnode)
1722 for f in c[3]:
1721 for f in c[3]:
1723 # This is to make sure we only have one instance of each
1722 # This is to make sure we only have one instance of each
1724 # filename string for each filename.
1723 # filename string for each filename.
1725 changedfileset.setdefault(f, f)
1724 changedfileset.setdefault(f, f)
1726 msng_mnfst_set.setdefault(c[0], clnode)
1725 msng_mnfst_set.setdefault(c[0], clnode)
1727 return collect_manifests_and_files
1726 return collect_manifests_and_files
1728
1727
1729 # Figure out which manifest nodes (of the ones we think might be part
1728 # Figure out which manifest nodes (of the ones we think might be part
1730 # of the changegroup) the recipient must know about and remove them
1729 # of the changegroup) the recipient must know about and remove them
1731 # from the changegroup.
1730 # from the changegroup.
1732 def prune_manifests():
1731 def prune_manifests():
1733 has_mnfst_set = set()
1732 has_mnfst_set = set()
1734 for n in msng_mnfst_set:
1733 for n in msng_mnfst_set:
1735 # If a 'missing' manifest thinks it belongs to a changenode
1734 # If a 'missing' manifest thinks it belongs to a changenode
1736 # the recipient is assumed to have, obviously the recipient
1735 # the recipient is assumed to have, obviously the recipient
1737 # must have that manifest.
1736 # must have that manifest.
1738 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1737 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1739 if linknode in has_cl_set:
1738 if linknode in has_cl_set:
1740 has_mnfst_set.add(n)
1739 has_mnfst_set.add(n)
1741 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1740 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1742
1741
1743 # Use the information collected in collect_manifests_and_files to say
1742 # Use the information collected in collect_manifests_and_files to say
1744 # which changenode any manifestnode belongs to.
1743 # which changenode any manifestnode belongs to.
1745 def lookup_manifest_link(mnfstnode):
1744 def lookup_manifest_link(mnfstnode):
1746 return msng_mnfst_set[mnfstnode]
1745 return msng_mnfst_set[mnfstnode]
1747
1746
1748 # A function generating function that sets up the initial environment
1747 # A function generating function that sets up the initial environment
1749 # the inner function.
1748 # the inner function.
1750 def filenode_collector(changedfiles):
1749 def filenode_collector(changedfiles):
1751 next_rev = [0]
1750 next_rev = [0]
1752 # This gathers information from each manifestnode included in the
1751 # This gathers information from each manifestnode included in the
1753 # changegroup about which filenodes the manifest node references
1752 # changegroup about which filenodes the manifest node references
1754 # so we can include those in the changegroup too.
1753 # so we can include those in the changegroup too.
1755 #
1754 #
1756 # It also remembers which changenode each filenode belongs to. It
1755 # It also remembers which changenode each filenode belongs to. It
1757 # does this by assuming the a filenode belongs to the changenode
1756 # does this by assuming the a filenode belongs to the changenode
1758 # the first manifest that references it belongs to.
1757 # the first manifest that references it belongs to.
1759 def collect_msng_filenodes(mnfstnode):
1758 def collect_msng_filenodes(mnfstnode):
1760 r = mnfst.rev(mnfstnode)
1759 r = mnfst.rev(mnfstnode)
1761 if r == next_rev[0]:
1760 if r == next_rev[0]:
1762 # If the last rev we looked at was the one just previous,
1761 # If the last rev we looked at was the one just previous,
1763 # we only need to see a diff.
1762 # we only need to see a diff.
1764 deltamf = mnfst.readdelta(mnfstnode)
1763 deltamf = mnfst.readdelta(mnfstnode)
1765 # For each line in the delta
1764 # For each line in the delta
1766 for f, fnode in deltamf.iteritems():
1765 for f, fnode in deltamf.iteritems():
1767 f = changedfiles.get(f, None)
1766 f = changedfiles.get(f, None)
1768 # And if the file is in the list of files we care
1767 # And if the file is in the list of files we care
1769 # about.
1768 # about.
1770 if f is not None:
1769 if f is not None:
1771 # Get the changenode this manifest belongs to
1770 # Get the changenode this manifest belongs to
1772 clnode = msng_mnfst_set[mnfstnode]
1771 clnode = msng_mnfst_set[mnfstnode]
1773 # Create the set of filenodes for the file if
1772 # Create the set of filenodes for the file if
1774 # there isn't one already.
1773 # there isn't one already.
1775 ndset = msng_filenode_set.setdefault(f, {})
1774 ndset = msng_filenode_set.setdefault(f, {})
1776 # And set the filenode's changelog node to the
1775 # And set the filenode's changelog node to the
1777 # manifest's if it hasn't been set already.
1776 # manifest's if it hasn't been set already.
1778 ndset.setdefault(fnode, clnode)
1777 ndset.setdefault(fnode, clnode)
1779 else:
1778 else:
1780 # Otherwise we need a full manifest.
1779 # Otherwise we need a full manifest.
1781 m = mnfst.read(mnfstnode)
1780 m = mnfst.read(mnfstnode)
1782 # For every file in we care about.
1781 # For every file in we care about.
1783 for f in changedfiles:
1782 for f in changedfiles:
1784 fnode = m.get(f, None)
1783 fnode = m.get(f, None)
1785 # If it's in the manifest
1784 # If it's in the manifest
1786 if fnode is not None:
1785 if fnode is not None:
1787 # See comments above.
1786 # See comments above.
1788 clnode = msng_mnfst_set[mnfstnode]
1787 clnode = msng_mnfst_set[mnfstnode]
1789 ndset = msng_filenode_set.setdefault(f, {})
1788 ndset = msng_filenode_set.setdefault(f, {})
1790 ndset.setdefault(fnode, clnode)
1789 ndset.setdefault(fnode, clnode)
1791 # Remember the revision we hope to see next.
1790 # Remember the revision we hope to see next.
1792 next_rev[0] = r + 1
1791 next_rev[0] = r + 1
1793 return collect_msng_filenodes
1792 return collect_msng_filenodes
1794
1793
1795 # We have a list of filenodes we think we need for a file, lets remove
1794 # We have a list of filenodes we think we need for a file, lets remove
1796 # all those we know the recipient must have.
1795 # all those we know the recipient must have.
1797 def prune_filenodes(f, filerevlog):
1796 def prune_filenodes(f, filerevlog):
1798 msngset = msng_filenode_set[f]
1797 msngset = msng_filenode_set[f]
1799 hasset = set()
1798 hasset = set()
1800 # If a 'missing' filenode thinks it belongs to a changenode we
1799 # If a 'missing' filenode thinks it belongs to a changenode we
1801 # assume the recipient must have, then the recipient must have
1800 # assume the recipient must have, then the recipient must have
1802 # that filenode.
1801 # that filenode.
1803 for n in msngset:
1802 for n in msngset:
1804 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1803 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1805 if clnode in has_cl_set:
1804 if clnode in has_cl_set:
1806 hasset.add(n)
1805 hasset.add(n)
1807 prune_parents(filerevlog, hasset, msngset)
1806 prune_parents(filerevlog, hasset, msngset)
1808
1807
1809 # A function generator function that sets up the a context for the
1808 # A function generator function that sets up the a context for the
1810 # inner function.
1809 # inner function.
1811 def lookup_filenode_link_func(fname):
1810 def lookup_filenode_link_func(fname):
1812 msngset = msng_filenode_set[fname]
1811 msngset = msng_filenode_set[fname]
1813 # Lookup the changenode the filenode belongs to.
1812 # Lookup the changenode the filenode belongs to.
1814 def lookup_filenode_link(fnode):
1813 def lookup_filenode_link(fnode):
1815 return msngset[fnode]
1814 return msngset[fnode]
1816 return lookup_filenode_link
1815 return lookup_filenode_link
1817
1816
1818 # Add the nodes that were explicitly requested.
1817 # Add the nodes that were explicitly requested.
1819 def add_extra_nodes(name, nodes):
1818 def add_extra_nodes(name, nodes):
1820 if not extranodes or name not in extranodes:
1819 if not extranodes or name not in extranodes:
1821 return
1820 return
1822
1821
1823 for node, linknode in extranodes[name]:
1822 for node, linknode in extranodes[name]:
1824 if node not in nodes:
1823 if node not in nodes:
1825 nodes[node] = linknode
1824 nodes[node] = linknode
1826
1825
1827 # Now that we have all theses utility functions to help out and
1826 # Now that we have all theses utility functions to help out and
1828 # logically divide up the task, generate the group.
1827 # logically divide up the task, generate the group.
1829 def gengroup():
1828 def gengroup():
1830 # The set of changed files starts empty.
1829 # The set of changed files starts empty.
1831 changedfiles = {}
1830 changedfiles = {}
1832 # Create a changenode group generator that will call our functions
1831 # Create a changenode group generator that will call our functions
1833 # back to lookup the owning changenode and collect information.
1832 # back to lookup the owning changenode and collect information.
1834 group = cl.group(msng_cl_lst, identity,
1833 group = cl.group(msng_cl_lst, identity,
1835 manifest_and_file_collector(changedfiles))
1834 manifest_and_file_collector(changedfiles))
1836 for chnk in group:
1835 for chnk in group:
1837 yield chnk
1836 yield chnk
1838
1837
1839 # The list of manifests has been collected by the generator
1838 # The list of manifests has been collected by the generator
1840 # calling our functions back.
1839 # calling our functions back.
1841 prune_manifests()
1840 prune_manifests()
1842 add_extra_nodes(1, msng_mnfst_set)
1841 add_extra_nodes(1, msng_mnfst_set)
1843 msng_mnfst_lst = msng_mnfst_set.keys()
1842 msng_mnfst_lst = msng_mnfst_set.keys()
1844 # Sort the manifestnodes by revision number.
1843 # Sort the manifestnodes by revision number.
1845 msng_mnfst_lst.sort(key=mnfst.rev)
1844 msng_mnfst_lst.sort(key=mnfst.rev)
1846 # Create a generator for the manifestnodes that calls our lookup
1845 # Create a generator for the manifestnodes that calls our lookup
1847 # and data collection functions back.
1846 # and data collection functions back.
1848 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1847 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1849 filenode_collector(changedfiles))
1848 filenode_collector(changedfiles))
1850 for chnk in group:
1849 for chnk in group:
1851 yield chnk
1850 yield chnk
1852
1851
1853 # These are no longer needed, dereference and toss the memory for
1852 # These are no longer needed, dereference and toss the memory for
1854 # them.
1853 # them.
1855 msng_mnfst_lst = None
1854 msng_mnfst_lst = None
1856 msng_mnfst_set.clear()
1855 msng_mnfst_set.clear()
1857
1856
1858 if extranodes:
1857 if extranodes:
1859 for fname in extranodes:
1858 for fname in extranodes:
1860 if isinstance(fname, int):
1859 if isinstance(fname, int):
1861 continue
1860 continue
1862 msng_filenode_set.setdefault(fname, {})
1861 msng_filenode_set.setdefault(fname, {})
1863 changedfiles[fname] = 1
1862 changedfiles[fname] = 1
1864 # Go through all our files in order sorted by name.
1863 # Go through all our files in order sorted by name.
1865 for fname in sorted(changedfiles):
1864 for fname in sorted(changedfiles):
1866 filerevlog = self.file(fname)
1865 filerevlog = self.file(fname)
1867 if not len(filerevlog):
1866 if not len(filerevlog):
1868 raise util.Abort(_("empty or missing revlog for %s") % fname)
1867 raise util.Abort(_("empty or missing revlog for %s") % fname)
1869 # Toss out the filenodes that the recipient isn't really
1868 # Toss out the filenodes that the recipient isn't really
1870 # missing.
1869 # missing.
1871 if fname in msng_filenode_set:
1870 if fname in msng_filenode_set:
1872 prune_filenodes(fname, filerevlog)
1871 prune_filenodes(fname, filerevlog)
1873 add_extra_nodes(fname, msng_filenode_set[fname])
1872 add_extra_nodes(fname, msng_filenode_set[fname])
1874 msng_filenode_lst = msng_filenode_set[fname].keys()
1873 msng_filenode_lst = msng_filenode_set[fname].keys()
1875 else:
1874 else:
1876 msng_filenode_lst = []
1875 msng_filenode_lst = []
1877 # If any filenodes are left, generate the group for them,
1876 # If any filenodes are left, generate the group for them,
1878 # otherwise don't bother.
1877 # otherwise don't bother.
1879 if len(msng_filenode_lst) > 0:
1878 if len(msng_filenode_lst) > 0:
1880 yield changegroup.chunkheader(len(fname))
1879 yield changegroup.chunkheader(len(fname))
1881 yield fname
1880 yield fname
1882 # Sort the filenodes by their revision #
1881 # Sort the filenodes by their revision #
1883 msng_filenode_lst.sort(key=filerevlog.rev)
1882 msng_filenode_lst.sort(key=filerevlog.rev)
1884 # Create a group generator and only pass in a changenode
1883 # Create a group generator and only pass in a changenode
1885 # lookup function as we need to collect no information
1884 # lookup function as we need to collect no information
1886 # from filenodes.
1885 # from filenodes.
1887 group = filerevlog.group(msng_filenode_lst,
1886 group = filerevlog.group(msng_filenode_lst,
1888 lookup_filenode_link_func(fname))
1887 lookup_filenode_link_func(fname))
1889 for chnk in group:
1888 for chnk in group:
1890 yield chnk
1889 yield chnk
1891 if fname in msng_filenode_set:
1890 if fname in msng_filenode_set:
1892 # Don't need this anymore, toss it to free memory.
1891 # Don't need this anymore, toss it to free memory.
1893 del msng_filenode_set[fname]
1892 del msng_filenode_set[fname]
1894 # Signal that no more groups are left.
1893 # Signal that no more groups are left.
1895 yield changegroup.closechunk()
1894 yield changegroup.closechunk()
1896
1895
1897 if msng_cl_lst:
1896 if msng_cl_lst:
1898 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1897 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1899
1898
1900 return util.chunkbuffer(gengroup())
1899 return util.chunkbuffer(gengroup())
1901
1900
1902 def changegroup(self, basenodes, source):
1901 def changegroup(self, basenodes, source):
1903 # to avoid a race we use changegroupsubset() (issue1320)
1902 # to avoid a race we use changegroupsubset() (issue1320)
1904 return self.changegroupsubset(basenodes, self.heads(), source)
1903 return self.changegroupsubset(basenodes, self.heads(), source)
1905
1904
1906 def _changegroup(self, common, source):
1905 def _changegroup(self, nodes, source):
1907 """Compute the changegroup of all nodes that we have that a recipient
1906 """Compute the changegroup of all nodes that we have that a recipient
1908 doesn't. Return a chunkbuffer object whose read() method will return
1907 doesn't. Return a chunkbuffer object whose read() method will return
1909 successive changegroup chunks.
1908 successive changegroup chunks.
1910
1909
1911 This is much easier than the previous function as we can assume that
1910 This is much easier than the previous function as we can assume that
1912 the recipient has any changenode we aren't sending them.
1911 the recipient has any changenode we aren't sending them.
1913
1912
1914 common is the set of common nodes between remote and self"""
1913 nodes is the set of nodes to send"""
1915
1914
1916 self.hook('preoutgoing', throw=True, source=source)
1915 self.hook('preoutgoing', throw=True, source=source)
1917
1916
1918 cl = self.changelog
1917 cl = self.changelog
1919 nodes = cl.findmissing(common)
1920 revset = set([cl.rev(n) for n in nodes])
1918 revset = set([cl.rev(n) for n in nodes])
1921 self.changegroupinfo(nodes, source)
1919 self.changegroupinfo(nodes, source)
1922
1920
1923 def identity(x):
1921 def identity(x):
1924 return x
1922 return x
1925
1923
1926 def gennodelst(log):
1924 def gennodelst(log):
1927 for r in log:
1925 for r in log:
1928 if log.linkrev(r) in revset:
1926 if log.linkrev(r) in revset:
1929 yield log.node(r)
1927 yield log.node(r)
1930
1928
1931 def changed_file_collector(changedfileset):
1929 def changed_file_collector(changedfileset):
1932 def collect_changed_files(clnode):
1930 def collect_changed_files(clnode):
1933 c = cl.read(clnode)
1931 c = cl.read(clnode)
1934 changedfileset.update(c[3])
1932 changedfileset.update(c[3])
1935 return collect_changed_files
1933 return collect_changed_files
1936
1934
1937 def lookuprevlink_func(revlog):
1935 def lookuprevlink_func(revlog):
1938 def lookuprevlink(n):
1936 def lookuprevlink(n):
1939 return cl.node(revlog.linkrev(revlog.rev(n)))
1937 return cl.node(revlog.linkrev(revlog.rev(n)))
1940 return lookuprevlink
1938 return lookuprevlink
1941
1939
1942 def gengroup():
1940 def gengroup():
1943 '''yield a sequence of changegroup chunks (strings)'''
1941 '''yield a sequence of changegroup chunks (strings)'''
1944 # construct a list of all changed files
1942 # construct a list of all changed files
1945 changedfiles = set()
1943 changedfiles = set()
1946
1944
1947 for chnk in cl.group(nodes, identity,
1945 for chnk in cl.group(nodes, identity,
1948 changed_file_collector(changedfiles)):
1946 changed_file_collector(changedfiles)):
1949 yield chnk
1947 yield chnk
1950
1948
1951 mnfst = self.manifest
1949 mnfst = self.manifest
1952 nodeiter = gennodelst(mnfst)
1950 nodeiter = gennodelst(mnfst)
1953 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1951 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1954 yield chnk
1952 yield chnk
1955
1953
1956 for fname in sorted(changedfiles):
1954 for fname in sorted(changedfiles):
1957 filerevlog = self.file(fname)
1955 filerevlog = self.file(fname)
1958 if not len(filerevlog):
1956 if not len(filerevlog):
1959 raise util.Abort(_("empty or missing revlog for %s") % fname)
1957 raise util.Abort(_("empty or missing revlog for %s") % fname)
1960 nodeiter = gennodelst(filerevlog)
1958 nodeiter = gennodelst(filerevlog)
1961 nodeiter = list(nodeiter)
1959 nodeiter = list(nodeiter)
1962 if nodeiter:
1960 if nodeiter:
1963 yield changegroup.chunkheader(len(fname))
1961 yield changegroup.chunkheader(len(fname))
1964 yield fname
1962 yield fname
1965 lookup = lookuprevlink_func(filerevlog)
1963 lookup = lookuprevlink_func(filerevlog)
1966 for chnk in filerevlog.group(nodeiter, lookup):
1964 for chnk in filerevlog.group(nodeiter, lookup):
1967 yield chnk
1965 yield chnk
1968
1966
1969 yield changegroup.closechunk()
1967 yield changegroup.closechunk()
1970
1968
1971 if nodes:
1969 if nodes:
1972 self.hook('outgoing', node=hex(nodes[0]), source=source)
1970 self.hook('outgoing', node=hex(nodes[0]), source=source)
1973
1971
1974 return util.chunkbuffer(gengroup())
1972 return util.chunkbuffer(gengroup())
1975
1973
1976 def addchangegroup(self, source, srctype, url, emptyok=False):
1974 def addchangegroup(self, source, srctype, url, emptyok=False):
1977 """add changegroup to repo.
1975 """add changegroup to repo.
1978
1976
1979 return values:
1977 return values:
1980 - nothing changed or no source: 0
1978 - nothing changed or no source: 0
1981 - more heads than before: 1+added heads (2..n)
1979 - more heads than before: 1+added heads (2..n)
1982 - less heads than before: -1-removed heads (-2..-n)
1980 - less heads than before: -1-removed heads (-2..-n)
1983 - number of heads stays the same: 1
1981 - number of heads stays the same: 1
1984 """
1982 """
1985 def csmap(x):
1983 def csmap(x):
1986 self.ui.debug("add changeset %s\n" % short(x))
1984 self.ui.debug("add changeset %s\n" % short(x))
1987 return len(cl)
1985 return len(cl)
1988
1986
1989 def revmap(x):
1987 def revmap(x):
1990 return cl.rev(x)
1988 return cl.rev(x)
1991
1989
1992 if not source:
1990 if not source:
1993 return 0
1991 return 0
1994
1992
1995 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1993 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1996
1994
1997 changesets = files = revisions = 0
1995 changesets = files = revisions = 0
1998
1996
1999 # write changelog data to temp files so concurrent readers will not see
1997 # write changelog data to temp files so concurrent readers will not see
2000 # inconsistent view
1998 # inconsistent view
2001 cl = self.changelog
1999 cl = self.changelog
2002 cl.delayupdate()
2000 cl.delayupdate()
2003 oldheads = len(cl.heads())
2001 oldheads = len(cl.heads())
2004
2002
2005 tr = self.transaction()
2003 tr = self.transaction()
2006 try:
2004 try:
2007 trp = weakref.proxy(tr)
2005 trp = weakref.proxy(tr)
2008 # pull off the changeset group
2006 # pull off the changeset group
2009 self.ui.status(_("adding changesets\n"))
2007 self.ui.status(_("adding changesets\n"))
2010 clstart = len(cl)
2008 clstart = len(cl)
2011 chunkiter = changegroup.chunkiter(source)
2009 chunkiter = changegroup.chunkiter(source)
2012 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2010 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2013 raise util.Abort(_("received changelog group is empty"))
2011 raise util.Abort(_("received changelog group is empty"))
2014 clend = len(cl)
2012 clend = len(cl)
2015 changesets = clend - clstart
2013 changesets = clend - clstart
2016
2014
2017 # pull off the manifest group
2015 # pull off the manifest group
2018 self.ui.status(_("adding manifests\n"))
2016 self.ui.status(_("adding manifests\n"))
2019 chunkiter = changegroup.chunkiter(source)
2017 chunkiter = changegroup.chunkiter(source)
2020 # no need to check for empty manifest group here:
2018 # no need to check for empty manifest group here:
2021 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2019 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2022 # no new manifest will be created and the manifest group will
2020 # no new manifest will be created and the manifest group will
2023 # be empty during the pull
2021 # be empty during the pull
2024 self.manifest.addgroup(chunkiter, revmap, trp)
2022 self.manifest.addgroup(chunkiter, revmap, trp)
2025
2023
2026 # process the files
2024 # process the files
2027 self.ui.status(_("adding file changes\n"))
2025 self.ui.status(_("adding file changes\n"))
2028 while 1:
2026 while 1:
2029 f = changegroup.getchunk(source)
2027 f = changegroup.getchunk(source)
2030 if not f:
2028 if not f:
2031 break
2029 break
2032 self.ui.debug("adding %s revisions\n" % f)
2030 self.ui.debug("adding %s revisions\n" % f)
2033 fl = self.file(f)
2031 fl = self.file(f)
2034 o = len(fl)
2032 o = len(fl)
2035 chunkiter = changegroup.chunkiter(source)
2033 chunkiter = changegroup.chunkiter(source)
2036 if fl.addgroup(chunkiter, revmap, trp) is None:
2034 if fl.addgroup(chunkiter, revmap, trp) is None:
2037 raise util.Abort(_("received file revlog group is empty"))
2035 raise util.Abort(_("received file revlog group is empty"))
2038 revisions += len(fl) - o
2036 revisions += len(fl) - o
2039 files += 1
2037 files += 1
2040
2038
2041 newheads = len(cl.heads())
2039 newheads = len(cl.heads())
2042 heads = ""
2040 heads = ""
2043 if oldheads and newheads != oldheads:
2041 if oldheads and newheads != oldheads:
2044 heads = _(" (%+d heads)") % (newheads - oldheads)
2042 heads = _(" (%+d heads)") % (newheads - oldheads)
2045
2043
2046 self.ui.status(_("added %d changesets"
2044 self.ui.status(_("added %d changesets"
2047 " with %d changes to %d files%s\n")
2045 " with %d changes to %d files%s\n")
2048 % (changesets, revisions, files, heads))
2046 % (changesets, revisions, files, heads))
2049
2047
2050 if changesets > 0:
2048 if changesets > 0:
2051 p = lambda: cl.writepending() and self.root or ""
2049 p = lambda: cl.writepending() and self.root or ""
2052 self.hook('pretxnchangegroup', throw=True,
2050 self.hook('pretxnchangegroup', throw=True,
2053 node=hex(cl.node(clstart)), source=srctype,
2051 node=hex(cl.node(clstart)), source=srctype,
2054 url=url, pending=p)
2052 url=url, pending=p)
2055
2053
2056 # make changelog see real files again
2054 # make changelog see real files again
2057 cl.finalize(trp)
2055 cl.finalize(trp)
2058
2056
2059 tr.close()
2057 tr.close()
2060 finally:
2058 finally:
2061 del tr
2059 del tr
2062
2060
2063 if changesets > 0:
2061 if changesets > 0:
2064 # forcefully update the on-disk branch cache
2062 # forcefully update the on-disk branch cache
2065 self.ui.debug("updating the branch cache\n")
2063 self.ui.debug("updating the branch cache\n")
2066 self.branchtags()
2064 self.branchtags()
2067 self.hook("changegroup", node=hex(cl.node(clstart)),
2065 self.hook("changegroup", node=hex(cl.node(clstart)),
2068 source=srctype, url=url)
2066 source=srctype, url=url)
2069
2067
2070 for i in xrange(clstart, clend):
2068 for i in xrange(clstart, clend):
2071 self.hook("incoming", node=hex(cl.node(i)),
2069 self.hook("incoming", node=hex(cl.node(i)),
2072 source=srctype, url=url)
2070 source=srctype, url=url)
2073
2071
2074 # never return 0 here:
2072 # never return 0 here:
2075 if newheads < oldheads:
2073 if newheads < oldheads:
2076 return newheads - oldheads - 1
2074 return newheads - oldheads - 1
2077 else:
2075 else:
2078 return newheads - oldheads + 1
2076 return newheads - oldheads + 1
2079
2077
2080
2078
2081 def stream_in(self, remote):
2079 def stream_in(self, remote):
2082 fp = remote.stream_out()
2080 fp = remote.stream_out()
2083 l = fp.readline()
2081 l = fp.readline()
2084 try:
2082 try:
2085 resp = int(l)
2083 resp = int(l)
2086 except ValueError:
2084 except ValueError:
2087 raise error.ResponseError(
2085 raise error.ResponseError(
2088 _('Unexpected response from remote server:'), l)
2086 _('Unexpected response from remote server:'), l)
2089 if resp == 1:
2087 if resp == 1:
2090 raise util.Abort(_('operation forbidden by server'))
2088 raise util.Abort(_('operation forbidden by server'))
2091 elif resp == 2:
2089 elif resp == 2:
2092 raise util.Abort(_('locking the remote repository failed'))
2090 raise util.Abort(_('locking the remote repository failed'))
2093 elif resp != 0:
2091 elif resp != 0:
2094 raise util.Abort(_('the server sent an unknown error code'))
2092 raise util.Abort(_('the server sent an unknown error code'))
2095 self.ui.status(_('streaming all changes\n'))
2093 self.ui.status(_('streaming all changes\n'))
2096 l = fp.readline()
2094 l = fp.readline()
2097 try:
2095 try:
2098 total_files, total_bytes = map(int, l.split(' ', 1))
2096 total_files, total_bytes = map(int, l.split(' ', 1))
2099 except (ValueError, TypeError):
2097 except (ValueError, TypeError):
2100 raise error.ResponseError(
2098 raise error.ResponseError(
2101 _('Unexpected response from remote server:'), l)
2099 _('Unexpected response from remote server:'), l)
2102 self.ui.status(_('%d files to transfer, %s of data\n') %
2100 self.ui.status(_('%d files to transfer, %s of data\n') %
2103 (total_files, util.bytecount(total_bytes)))
2101 (total_files, util.bytecount(total_bytes)))
2104 start = time.time()
2102 start = time.time()
2105 for i in xrange(total_files):
2103 for i in xrange(total_files):
2106 # XXX doesn't support '\n' or '\r' in filenames
2104 # XXX doesn't support '\n' or '\r' in filenames
2107 l = fp.readline()
2105 l = fp.readline()
2108 try:
2106 try:
2109 name, size = l.split('\0', 1)
2107 name, size = l.split('\0', 1)
2110 size = int(size)
2108 size = int(size)
2111 except (ValueError, TypeError):
2109 except (ValueError, TypeError):
2112 raise error.ResponseError(
2110 raise error.ResponseError(
2113 _('Unexpected response from remote server:'), l)
2111 _('Unexpected response from remote server:'), l)
2114 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2112 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2115 # for backwards compat, name was partially encoded
2113 # for backwards compat, name was partially encoded
2116 ofp = self.sopener(store.decodedir(name), 'w')
2114 ofp = self.sopener(store.decodedir(name), 'w')
2117 for chunk in util.filechunkiter(fp, limit=size):
2115 for chunk in util.filechunkiter(fp, limit=size):
2118 ofp.write(chunk)
2116 ofp.write(chunk)
2119 ofp.close()
2117 ofp.close()
2120 elapsed = time.time() - start
2118 elapsed = time.time() - start
2121 if elapsed <= 0:
2119 if elapsed <= 0:
2122 elapsed = 0.001
2120 elapsed = 0.001
2123 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2121 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2124 (util.bytecount(total_bytes), elapsed,
2122 (util.bytecount(total_bytes), elapsed,
2125 util.bytecount(total_bytes / elapsed)))
2123 util.bytecount(total_bytes / elapsed)))
2126 self.invalidate()
2124 self.invalidate()
2127 return len(self.heads()) + 1
2125 return len(self.heads()) + 1
2128
2126
2129 def clone(self, remote, heads=[], stream=False):
2127 def clone(self, remote, heads=[], stream=False):
2130 '''clone remote repository.
2128 '''clone remote repository.
2131
2129
2132 keyword arguments:
2130 keyword arguments:
2133 heads: list of revs to clone (forces use of pull)
2131 heads: list of revs to clone (forces use of pull)
2134 stream: use streaming clone if possible'''
2132 stream: use streaming clone if possible'''
2135
2133
2136 # now, all clients that can request uncompressed clones can
2134 # now, all clients that can request uncompressed clones can
2137 # read repo formats supported by all servers that can serve
2135 # read repo formats supported by all servers that can serve
2138 # them.
2136 # them.
2139
2137
2140 # if revlog format changes, client will have to check version
2138 # if revlog format changes, client will have to check version
2141 # and format flags on "stream" capability, and use
2139 # and format flags on "stream" capability, and use
2142 # uncompressed only if compatible.
2140 # uncompressed only if compatible.
2143
2141
2144 if stream and not heads and remote.capable('stream'):
2142 if stream and not heads and remote.capable('stream'):
2145 return self.stream_in(remote)
2143 return self.stream_in(remote)
2146 return self.pull(remote, heads)
2144 return self.pull(remote, heads)
2147
2145
2148 # used to avoid circular references so destructors work
2146 # used to avoid circular references so destructors work
2149 def aftertrans(files):
2147 def aftertrans(files):
2150 renamefiles = [tuple(t) for t in files]
2148 renamefiles = [tuple(t) for t in files]
2151 def a():
2149 def a():
2152 for src, dest in renamefiles:
2150 for src, dest in renamefiles:
2153 util.rename(src, dest)
2151 util.rename(src, dest)
2154 return a
2152 return a
2155
2153
2156 def instance(ui, path, create):
2154 def instance(ui, path, create):
2157 return localrepository(ui, util.drop_scheme('file', path), create)
2155 return localrepository(ui, util.drop_scheme('file', path), create)
2158
2156
2159 def islocal(path):
2157 def islocal(path):
2160 return True
2158 return True
@@ -1,149 +1,168
1 #!/bin/sh
1 #!/bin/sh
2
2
3 cp "$TESTDIR"/printenv.py .
3 cp "$TESTDIR"/printenv.py .
4
4
5 echo "====== Setting up test"
5 echo "====== Setting up test"
6 hg init test
6 hg init test
7 cd test
7 cd test
8 echo 0 > afile
8 echo 0 > afile
9 hg add afile
9 hg add afile
10 hg commit -m "0.0" -d "1000000 0"
10 hg commit -m "0.0" -d "1000000 0"
11 echo 1 >> afile
11 echo 1 >> afile
12 hg commit -m "0.1" -d "1000000 0"
12 hg commit -m "0.1" -d "1000000 0"
13 echo 2 >> afile
13 echo 2 >> afile
14 hg commit -m "0.2" -d "1000000 0"
14 hg commit -m "0.2" -d "1000000 0"
15 echo 3 >> afile
15 echo 3 >> afile
16 hg commit -m "0.3" -d "1000000 0"
16 hg commit -m "0.3" -d "1000000 0"
17 hg update -C 0
17 hg update -C 0
18 echo 1 >> afile
18 echo 1 >> afile
19 hg commit -m "1.1" -d "1000000 0"
19 hg commit -m "1.1" -d "1000000 0"
20 echo 2 >> afile
20 echo 2 >> afile
21 hg commit -m "1.2" -d "1000000 0"
21 hg commit -m "1.2" -d "1000000 0"
22 echo "a line" > fred
22 echo "a line" > fred
23 echo 3 >> afile
23 echo 3 >> afile
24 hg add fred
24 hg add fred
25 hg commit -m "1.3" -d "1000000 0"
25 hg commit -m "1.3" -d "1000000 0"
26 hg mv afile adifferentfile
26 hg mv afile adifferentfile
27 hg commit -m "1.3m" -d "1000000 0"
27 hg commit -m "1.3m" -d "1000000 0"
28 hg update -C 3
28 hg update -C 3
29 hg mv afile anotherfile
29 hg mv afile anotherfile
30 hg commit -m "0.3m" -d "1000000 0"
30 hg commit -m "0.3m" -d "1000000 0"
31 hg verify
31 hg verify
32 cd ..
32 cd ..
33 hg init empty
33 hg init empty
34
34
35 echo "====== Bundle --all"
35 echo "====== Bundle --all"
36 hg -R test bundle --all all.hg
36 hg -R test bundle --all all.hg
37
37
38 echo "====== Bundle test to full.hg"
38 echo "====== Bundle test to full.hg"
39 hg -R test bundle full.hg empty
39 hg -R test bundle full.hg empty
40 echo "====== Unbundle full.hg in test"
40 echo "====== Unbundle full.hg in test"
41 hg -R test unbundle full.hg
41 hg -R test unbundle full.hg
42 echo "====== Verify empty"
42 echo "====== Verify empty"
43 hg -R empty heads
43 hg -R empty heads
44 hg -R empty verify
44 hg -R empty verify
45
45
46 echo "====== Pull full.hg into test (using --cwd)"
46 echo "====== Pull full.hg into test (using --cwd)"
47 hg --cwd test pull ../full.hg
47 hg --cwd test pull ../full.hg
48 echo "====== Pull full.hg into empty (using --cwd)"
48 echo "====== Pull full.hg into empty (using --cwd)"
49 hg --cwd empty pull ../full.hg
49 hg --cwd empty pull ../full.hg
50 echo "====== Rollback empty"
50 echo "====== Rollback empty"
51 hg -R empty rollback
51 hg -R empty rollback
52 echo "====== Pull full.hg into empty again (using --cwd)"
52 echo "====== Pull full.hg into empty again (using --cwd)"
53 hg --cwd empty pull ../full.hg
53 hg --cwd empty pull ../full.hg
54
54
55 echo "====== Pull full.hg into test (using -R)"
55 echo "====== Pull full.hg into test (using -R)"
56 hg -R test pull full.hg
56 hg -R test pull full.hg
57 echo "====== Pull full.hg into empty (using -R)"
57 echo "====== Pull full.hg into empty (using -R)"
58 hg -R empty pull full.hg
58 hg -R empty pull full.hg
59 echo "====== Rollback empty"
59 echo "====== Rollback empty"
60 hg -R empty rollback
60 hg -R empty rollback
61 echo "====== Pull full.hg into empty again (using -R)"
61 echo "====== Pull full.hg into empty again (using -R)"
62 hg -R empty pull full.hg
62 hg -R empty pull full.hg
63
63
64 echo "====== Log -R full.hg in fresh empty"
64 echo "====== Log -R full.hg in fresh empty"
65 rm -r empty
65 rm -r empty
66 hg init empty
66 hg init empty
67 cd empty
67 cd empty
68 hg -R bundle://../full.hg log
68 hg -R bundle://../full.hg log
69
69
70 echo "====== Pull ../full.hg into empty (with hook)"
70 echo "====== Pull ../full.hg into empty (with hook)"
71 echo '[hooks]' >> .hg/hgrc
71 echo '[hooks]' >> .hg/hgrc
72 echo 'changegroup = python ../printenv.py changegroup' >> .hg/hgrc
72 echo 'changegroup = python ../printenv.py changegroup' >> .hg/hgrc
73 #doesn't work (yet ?)
73 #doesn't work (yet ?)
74 #hg -R bundle://../full.hg verify
74 #hg -R bundle://../full.hg verify
75 hg pull bundle://../full.hg
75 hg pull bundle://../full.hg
76 echo "====== Rollback empty"
76 echo "====== Rollback empty"
77 hg rollback
77 hg rollback
78 cd ..
78 cd ..
79 echo "====== Log -R bundle:empty+full.hg"
79 echo "====== Log -R bundle:empty+full.hg"
80 hg -R bundle:empty+full.hg log --template="{rev} "
80 hg -R bundle:empty+full.hg log --template="{rev} "
81 echo ""
81 echo ""
82 echo "====== Pull full.hg into empty again (using -R; with hook)"
82 echo "====== Pull full.hg into empty again (using -R; with hook)"
83 hg -R empty pull full.hg
83 hg -R empty pull full.hg
84
84
85 echo "====== Create partial clones"
85 echo "====== Create partial clones"
86 rm -r empty
86 rm -r empty
87 hg init empty
87 hg init empty
88 hg clone -r 3 test partial
88 hg clone -r 3 test partial
89 hg clone partial partial2
89 hg clone partial partial2
90 cd partial
90 cd partial
91 echo "====== Log -R full.hg in partial"
91 echo "====== Log -R full.hg in partial"
92 hg -R bundle://../full.hg log
92 hg -R bundle://../full.hg log
93 echo "====== Incoming full.hg in partial"
93 echo "====== Incoming full.hg in partial"
94 hg incoming bundle://../full.hg
94 hg incoming bundle://../full.hg
95 echo "====== Outgoing -R full.hg vs partial2 in partial"
95 echo "====== Outgoing -R full.hg vs partial2 in partial"
96 hg -R bundle://../full.hg outgoing ../partial2
96 hg -R bundle://../full.hg outgoing ../partial2
97 echo "====== Outgoing -R does-not-exist.hg vs partial2 in partial"
97 echo "====== Outgoing -R does-not-exist.hg vs partial2 in partial"
98 hg -R bundle://../does-not-exist.hg outgoing ../partial2
98 hg -R bundle://../does-not-exist.hg outgoing ../partial2
99 cd ..
99 cd ..
100
100
101 echo "====== Direct clone from bundle (all-history)"
101 echo "====== Direct clone from bundle (all-history)"
102 hg clone full.hg full-clone
102 hg clone full.hg full-clone
103 hg -R full-clone heads
103 hg -R full-clone heads
104 rm -r full-clone
104 rm -r full-clone
105
105
106 # test for http://mercurial.selenic.com/bts/issue216
106 # test for http://mercurial.selenic.com/bts/issue216
107 echo "====== Unbundle incremental bundles into fresh empty in one go"
107 echo "====== Unbundle incremental bundles into fresh empty in one go"
108 rm -r empty
108 rm -r empty
109 hg init empty
109 hg init empty
110 hg -R test bundle --base null -r 0 ../0.hg
110 hg -R test bundle --base null -r 0 ../0.hg
111 hg -R test bundle --base 0 -r 1 ../1.hg
111 hg -R test bundle --base 0 -r 1 ../1.hg
112 hg -R empty unbundle -u ../0.hg ../1.hg
112 hg -R empty unbundle -u ../0.hg ../1.hg
113
113
114 # test for 540d1059c802
114 # test for 540d1059c802
115 echo "====== test for 540d1059c802"
115 echo "====== test for 540d1059c802"
116 hg init orig
116 hg init orig
117 cd orig
117 cd orig
118 echo foo > foo
118 echo foo > foo
119 hg add foo
119 hg add foo
120 hg ci -m 'add foo'
120 hg ci -m 'add foo'
121
121
122 hg clone . ../copy
122 hg clone . ../copy
123 hg tag foo
123 hg tag foo
124
124
125 cd ../copy
125 cd ../copy
126 echo >> foo
126 echo >> foo
127 hg ci -m 'change foo'
127 hg ci -m 'change foo'
128 hg bundle ../bundle.hg ../orig
128 hg bundle ../bundle.hg ../orig
129
129
130 cd ../orig
130 cd ../orig
131 hg incoming ../bundle.hg
131 hg incoming ../bundle.hg
132 cd ..
132 cd ..
133
133
134 # test for http://mercurial.selenic.com/bts/issue1144
134 # test for http://mercurial.selenic.com/bts/issue1144
135 echo "===== test that verify bundle does not traceback"
135 echo "===== test that verify bundle does not traceback"
136 # partial history bundle, fails w/ unkown parent
136 # partial history bundle, fails w/ unkown parent
137 hg -R bundle.hg verify
137 hg -R bundle.hg verify
138 # full history bundle, refuses to verify non-local repo
138 # full history bundle, refuses to verify non-local repo
139 hg -R all.hg verify
139 hg -R all.hg verify
140 # but, regular verify must continue to work
140 # but, regular verify must continue to work
141 hg -R orig verify
141 hg -R orig verify
142
142
143 echo "====== diff against bundle"
143 echo "====== diff against bundle"
144 hg init b
144 hg init b
145 cd b
145 cd b
146 hg -R ../all.hg diff -r tip
146 hg -R ../all.hg diff -r tip
147 cd ..
147 cd ..
148
148
149 echo "====== bundle single branch"
150 hg init branchy
151 cd branchy
152 echo a >a
153 hg ci -Ama
154 echo b >b
155 hg ci -Amb
156 echo b1 >b1
157 hg ci -Amb1
158 hg up 0
159 echo c >c
160 hg ci -Amc
161 echo c1 >c1
162 hg ci -Amc1
163 hg clone -q .#tip part
164 echo "== bundling via incoming"
165 hg in -R part --bundle incoming.hg --template "{node}\n" .
166 echo "== bundling"
167 hg bundle bundle.hg part --debug
149
168
@@ -1,328 +1,348
1 ====== Setting up test
1 ====== Setting up test
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
2 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 created new head
3 created new head
4 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
4 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
5 checking changesets
5 checking changesets
6 checking manifests
6 checking manifests
7 crosschecking files in changesets and manifests
7 crosschecking files in changesets and manifests
8 checking files
8 checking files
9 4 files, 9 changesets, 7 total revisions
9 4 files, 9 changesets, 7 total revisions
10 ====== Bundle --all
10 ====== Bundle --all
11 9 changesets found
11 9 changesets found
12 ====== Bundle test to full.hg
12 ====== Bundle test to full.hg
13 searching for changes
13 searching for changes
14 9 changesets found
14 9 changesets found
15 ====== Unbundle full.hg in test
15 ====== Unbundle full.hg in test
16 adding changesets
16 adding changesets
17 adding manifests
17 adding manifests
18 adding file changes
18 adding file changes
19 added 0 changesets with 0 changes to 4 files
19 added 0 changesets with 0 changes to 4 files
20 (run 'hg update' to get a working copy)
20 (run 'hg update' to get a working copy)
21 ====== Verify empty
21 ====== Verify empty
22 changeset: -1:000000000000
22 changeset: -1:000000000000
23 tag: tip
23 tag: tip
24 user:
24 user:
25 date: Thu Jan 01 00:00:00 1970 +0000
25 date: Thu Jan 01 00:00:00 1970 +0000
26
26
27 checking changesets
27 checking changesets
28 checking manifests
28 checking manifests
29 crosschecking files in changesets and manifests
29 crosschecking files in changesets and manifests
30 checking files
30 checking files
31 0 files, 0 changesets, 0 total revisions
31 0 files, 0 changesets, 0 total revisions
32 ====== Pull full.hg into test (using --cwd)
32 ====== Pull full.hg into test (using --cwd)
33 pulling from ../full.hg
33 pulling from ../full.hg
34 searching for changes
34 searching for changes
35 no changes found
35 no changes found
36 ====== Pull full.hg into empty (using --cwd)
36 ====== Pull full.hg into empty (using --cwd)
37 pulling from ../full.hg
37 pulling from ../full.hg
38 requesting all changes
38 requesting all changes
39 adding changesets
39 adding changesets
40 adding manifests
40 adding manifests
41 adding file changes
41 adding file changes
42 added 9 changesets with 7 changes to 4 files (+1 heads)
42 added 9 changesets with 7 changes to 4 files (+1 heads)
43 (run 'hg heads' to see heads, 'hg merge' to merge)
43 (run 'hg heads' to see heads, 'hg merge' to merge)
44 ====== Rollback empty
44 ====== Rollback empty
45 rolling back last transaction
45 rolling back last transaction
46 ====== Pull full.hg into empty again (using --cwd)
46 ====== Pull full.hg into empty again (using --cwd)
47 pulling from ../full.hg
47 pulling from ../full.hg
48 requesting all changes
48 requesting all changes
49 adding changesets
49 adding changesets
50 adding manifests
50 adding manifests
51 adding file changes
51 adding file changes
52 added 9 changesets with 7 changes to 4 files (+1 heads)
52 added 9 changesets with 7 changes to 4 files (+1 heads)
53 (run 'hg heads' to see heads, 'hg merge' to merge)
53 (run 'hg heads' to see heads, 'hg merge' to merge)
54 ====== Pull full.hg into test (using -R)
54 ====== Pull full.hg into test (using -R)
55 pulling from full.hg
55 pulling from full.hg
56 searching for changes
56 searching for changes
57 no changes found
57 no changes found
58 ====== Pull full.hg into empty (using -R)
58 ====== Pull full.hg into empty (using -R)
59 pulling from full.hg
59 pulling from full.hg
60 searching for changes
60 searching for changes
61 no changes found
61 no changes found
62 ====== Rollback empty
62 ====== Rollback empty
63 rolling back last transaction
63 rolling back last transaction
64 ====== Pull full.hg into empty again (using -R)
64 ====== Pull full.hg into empty again (using -R)
65 pulling from full.hg
65 pulling from full.hg
66 requesting all changes
66 requesting all changes
67 adding changesets
67 adding changesets
68 adding manifests
68 adding manifests
69 adding file changes
69 adding file changes
70 added 9 changesets with 7 changes to 4 files (+1 heads)
70 added 9 changesets with 7 changes to 4 files (+1 heads)
71 (run 'hg heads' to see heads, 'hg merge' to merge)
71 (run 'hg heads' to see heads, 'hg merge' to merge)
72 ====== Log -R full.hg in fresh empty
72 ====== Log -R full.hg in fresh empty
73 changeset: 8:836ac62537ab
73 changeset: 8:836ac62537ab
74 tag: tip
74 tag: tip
75 parent: 3:ac69c658229d
75 parent: 3:ac69c658229d
76 user: test
76 user: test
77 date: Mon Jan 12 13:46:40 1970 +0000
77 date: Mon Jan 12 13:46:40 1970 +0000
78 summary: 0.3m
78 summary: 0.3m
79
79
80 changeset: 7:80fe151401c2
80 changeset: 7:80fe151401c2
81 user: test
81 user: test
82 date: Mon Jan 12 13:46:40 1970 +0000
82 date: Mon Jan 12 13:46:40 1970 +0000
83 summary: 1.3m
83 summary: 1.3m
84
84
85 changeset: 6:1e3f6b843bd6
85 changeset: 6:1e3f6b843bd6
86 user: test
86 user: test
87 date: Mon Jan 12 13:46:40 1970 +0000
87 date: Mon Jan 12 13:46:40 1970 +0000
88 summary: 1.3
88 summary: 1.3
89
89
90 changeset: 5:024e4e7df376
90 changeset: 5:024e4e7df376
91 user: test
91 user: test
92 date: Mon Jan 12 13:46:40 1970 +0000
92 date: Mon Jan 12 13:46:40 1970 +0000
93 summary: 1.2
93 summary: 1.2
94
94
95 changeset: 4:5f4f3ceb285e
95 changeset: 4:5f4f3ceb285e
96 parent: 0:5649c9d34dd8
96 parent: 0:5649c9d34dd8
97 user: test
97 user: test
98 date: Mon Jan 12 13:46:40 1970 +0000
98 date: Mon Jan 12 13:46:40 1970 +0000
99 summary: 1.1
99 summary: 1.1
100
100
101 changeset: 3:ac69c658229d
101 changeset: 3:ac69c658229d
102 user: test
102 user: test
103 date: Mon Jan 12 13:46:40 1970 +0000
103 date: Mon Jan 12 13:46:40 1970 +0000
104 summary: 0.3
104 summary: 0.3
105
105
106 changeset: 2:d62976ca1e50
106 changeset: 2:d62976ca1e50
107 user: test
107 user: test
108 date: Mon Jan 12 13:46:40 1970 +0000
108 date: Mon Jan 12 13:46:40 1970 +0000
109 summary: 0.2
109 summary: 0.2
110
110
111 changeset: 1:10b2180f755b
111 changeset: 1:10b2180f755b
112 user: test
112 user: test
113 date: Mon Jan 12 13:46:40 1970 +0000
113 date: Mon Jan 12 13:46:40 1970 +0000
114 summary: 0.1
114 summary: 0.1
115
115
116 changeset: 0:5649c9d34dd8
116 changeset: 0:5649c9d34dd8
117 user: test
117 user: test
118 date: Mon Jan 12 13:46:40 1970 +0000
118 date: Mon Jan 12 13:46:40 1970 +0000
119 summary: 0.0
119 summary: 0.0
120
120
121 ====== Pull ../full.hg into empty (with hook)
121 ====== Pull ../full.hg into empty (with hook)
122 changegroup hook: HG_NODE=5649c9d34dd87d0ecb5fd39672128376e83b22e1 HG_SOURCE=pull HG_URL=bundle:../full.hg
122 changegroup hook: HG_NODE=5649c9d34dd87d0ecb5fd39672128376e83b22e1 HG_SOURCE=pull HG_URL=bundle:../full.hg
123 pulling from bundle://../full.hg
123 pulling from bundle://../full.hg
124 requesting all changes
124 requesting all changes
125 adding changesets
125 adding changesets
126 adding manifests
126 adding manifests
127 adding file changes
127 adding file changes
128 added 9 changesets with 7 changes to 4 files (+1 heads)
128 added 9 changesets with 7 changes to 4 files (+1 heads)
129 (run 'hg heads' to see heads, 'hg merge' to merge)
129 (run 'hg heads' to see heads, 'hg merge' to merge)
130 ====== Rollback empty
130 ====== Rollback empty
131 rolling back last transaction
131 rolling back last transaction
132 ====== Log -R bundle:empty+full.hg
132 ====== Log -R bundle:empty+full.hg
133 8 7 6 5 4 3 2 1 0
133 8 7 6 5 4 3 2 1 0
134 ====== Pull full.hg into empty again (using -R; with hook)
134 ====== Pull full.hg into empty again (using -R; with hook)
135 changegroup hook: HG_NODE=5649c9d34dd87d0ecb5fd39672128376e83b22e1 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
135 changegroup hook: HG_NODE=5649c9d34dd87d0ecb5fd39672128376e83b22e1 HG_SOURCE=pull HG_URL=bundle:empty+full.hg
136 pulling from full.hg
136 pulling from full.hg
137 requesting all changes
137 requesting all changes
138 adding changesets
138 adding changesets
139 adding manifests
139 adding manifests
140 adding file changes
140 adding file changes
141 added 9 changesets with 7 changes to 4 files (+1 heads)
141 added 9 changesets with 7 changes to 4 files (+1 heads)
142 (run 'hg heads' to see heads, 'hg merge' to merge)
142 (run 'hg heads' to see heads, 'hg merge' to merge)
143 ====== Create partial clones
143 ====== Create partial clones
144 requesting all changes
144 requesting all changes
145 adding changesets
145 adding changesets
146 adding manifests
146 adding manifests
147 adding file changes
147 adding file changes
148 added 4 changesets with 4 changes to 1 files
148 added 4 changesets with 4 changes to 1 files
149 updating to branch default
149 updating to branch default
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
150 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 updating to branch default
151 updating to branch default
152 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
152 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
153 ====== Log -R full.hg in partial
153 ====== Log -R full.hg in partial
154 changeset: 8:836ac62537ab
154 changeset: 8:836ac62537ab
155 tag: tip
155 tag: tip
156 parent: 3:ac69c658229d
156 parent: 3:ac69c658229d
157 user: test
157 user: test
158 date: Mon Jan 12 13:46:40 1970 +0000
158 date: Mon Jan 12 13:46:40 1970 +0000
159 summary: 0.3m
159 summary: 0.3m
160
160
161 changeset: 7:80fe151401c2
161 changeset: 7:80fe151401c2
162 user: test
162 user: test
163 date: Mon Jan 12 13:46:40 1970 +0000
163 date: Mon Jan 12 13:46:40 1970 +0000
164 summary: 1.3m
164 summary: 1.3m
165
165
166 changeset: 6:1e3f6b843bd6
166 changeset: 6:1e3f6b843bd6
167 user: test
167 user: test
168 date: Mon Jan 12 13:46:40 1970 +0000
168 date: Mon Jan 12 13:46:40 1970 +0000
169 summary: 1.3
169 summary: 1.3
170
170
171 changeset: 5:024e4e7df376
171 changeset: 5:024e4e7df376
172 user: test
172 user: test
173 date: Mon Jan 12 13:46:40 1970 +0000
173 date: Mon Jan 12 13:46:40 1970 +0000
174 summary: 1.2
174 summary: 1.2
175
175
176 changeset: 4:5f4f3ceb285e
176 changeset: 4:5f4f3ceb285e
177 parent: 0:5649c9d34dd8
177 parent: 0:5649c9d34dd8
178 user: test
178 user: test
179 date: Mon Jan 12 13:46:40 1970 +0000
179 date: Mon Jan 12 13:46:40 1970 +0000
180 summary: 1.1
180 summary: 1.1
181
181
182 changeset: 3:ac69c658229d
182 changeset: 3:ac69c658229d
183 user: test
183 user: test
184 date: Mon Jan 12 13:46:40 1970 +0000
184 date: Mon Jan 12 13:46:40 1970 +0000
185 summary: 0.3
185 summary: 0.3
186
186
187 changeset: 2:d62976ca1e50
187 changeset: 2:d62976ca1e50
188 user: test
188 user: test
189 date: Mon Jan 12 13:46:40 1970 +0000
189 date: Mon Jan 12 13:46:40 1970 +0000
190 summary: 0.2
190 summary: 0.2
191
191
192 changeset: 1:10b2180f755b
192 changeset: 1:10b2180f755b
193 user: test
193 user: test
194 date: Mon Jan 12 13:46:40 1970 +0000
194 date: Mon Jan 12 13:46:40 1970 +0000
195 summary: 0.1
195 summary: 0.1
196
196
197 changeset: 0:5649c9d34dd8
197 changeset: 0:5649c9d34dd8
198 user: test
198 user: test
199 date: Mon Jan 12 13:46:40 1970 +0000
199 date: Mon Jan 12 13:46:40 1970 +0000
200 summary: 0.0
200 summary: 0.0
201
201
202 ====== Incoming full.hg in partial
202 ====== Incoming full.hg in partial
203 comparing with bundle://../full.hg
203 comparing with bundle://../full.hg
204 searching for changes
204 searching for changes
205 changeset: 4:5f4f3ceb285e
205 changeset: 4:5f4f3ceb285e
206 parent: 0:5649c9d34dd8
206 parent: 0:5649c9d34dd8
207 user: test
207 user: test
208 date: Mon Jan 12 13:46:40 1970 +0000
208 date: Mon Jan 12 13:46:40 1970 +0000
209 summary: 1.1
209 summary: 1.1
210
210
211 changeset: 5:024e4e7df376
211 changeset: 5:024e4e7df376
212 user: test
212 user: test
213 date: Mon Jan 12 13:46:40 1970 +0000
213 date: Mon Jan 12 13:46:40 1970 +0000
214 summary: 1.2
214 summary: 1.2
215
215
216 changeset: 6:1e3f6b843bd6
216 changeset: 6:1e3f6b843bd6
217 user: test
217 user: test
218 date: Mon Jan 12 13:46:40 1970 +0000
218 date: Mon Jan 12 13:46:40 1970 +0000
219 summary: 1.3
219 summary: 1.3
220
220
221 changeset: 7:80fe151401c2
221 changeset: 7:80fe151401c2
222 user: test
222 user: test
223 date: Mon Jan 12 13:46:40 1970 +0000
223 date: Mon Jan 12 13:46:40 1970 +0000
224 summary: 1.3m
224 summary: 1.3m
225
225
226 changeset: 8:836ac62537ab
226 changeset: 8:836ac62537ab
227 tag: tip
227 tag: tip
228 parent: 3:ac69c658229d
228 parent: 3:ac69c658229d
229 user: test
229 user: test
230 date: Mon Jan 12 13:46:40 1970 +0000
230 date: Mon Jan 12 13:46:40 1970 +0000
231 summary: 0.3m
231 summary: 0.3m
232
232
233 ====== Outgoing -R full.hg vs partial2 in partial
233 ====== Outgoing -R full.hg vs partial2 in partial
234 comparing with ../partial2
234 comparing with ../partial2
235 searching for changes
235 searching for changes
236 changeset: 4:5f4f3ceb285e
236 changeset: 4:5f4f3ceb285e
237 parent: 0:5649c9d34dd8
237 parent: 0:5649c9d34dd8
238 user: test
238 user: test
239 date: Mon Jan 12 13:46:40 1970 +0000
239 date: Mon Jan 12 13:46:40 1970 +0000
240 summary: 1.1
240 summary: 1.1
241
241
242 changeset: 5:024e4e7df376
242 changeset: 5:024e4e7df376
243 user: test
243 user: test
244 date: Mon Jan 12 13:46:40 1970 +0000
244 date: Mon Jan 12 13:46:40 1970 +0000
245 summary: 1.2
245 summary: 1.2
246
246
247 changeset: 6:1e3f6b843bd6
247 changeset: 6:1e3f6b843bd6
248 user: test
248 user: test
249 date: Mon Jan 12 13:46:40 1970 +0000
249 date: Mon Jan 12 13:46:40 1970 +0000
250 summary: 1.3
250 summary: 1.3
251
251
252 changeset: 7:80fe151401c2
252 changeset: 7:80fe151401c2
253 user: test
253 user: test
254 date: Mon Jan 12 13:46:40 1970 +0000
254 date: Mon Jan 12 13:46:40 1970 +0000
255 summary: 1.3m
255 summary: 1.3m
256
256
257 changeset: 8:836ac62537ab
257 changeset: 8:836ac62537ab
258 tag: tip
258 tag: tip
259 parent: 3:ac69c658229d
259 parent: 3:ac69c658229d
260 user: test
260 user: test
261 date: Mon Jan 12 13:46:40 1970 +0000
261 date: Mon Jan 12 13:46:40 1970 +0000
262 summary: 0.3m
262 summary: 0.3m
263
263
264 ====== Outgoing -R does-not-exist.hg vs partial2 in partial
264 ====== Outgoing -R does-not-exist.hg vs partial2 in partial
265 abort: No such file or directory: ../does-not-exist.hg
265 abort: No such file or directory: ../does-not-exist.hg
266 ====== Direct clone from bundle (all-history)
266 ====== Direct clone from bundle (all-history)
267 requesting all changes
267 requesting all changes
268 adding changesets
268 adding changesets
269 adding manifests
269 adding manifests
270 adding file changes
270 adding file changes
271 added 9 changesets with 7 changes to 4 files (+1 heads)
271 added 9 changesets with 7 changes to 4 files (+1 heads)
272 updating to branch default
272 updating to branch default
273 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
273 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
274 changeset: 8:836ac62537ab
274 changeset: 8:836ac62537ab
275 tag: tip
275 tag: tip
276 parent: 3:ac69c658229d
276 parent: 3:ac69c658229d
277 user: test
277 user: test
278 date: Mon Jan 12 13:46:40 1970 +0000
278 date: Mon Jan 12 13:46:40 1970 +0000
279 summary: 0.3m
279 summary: 0.3m
280
280
281 changeset: 7:80fe151401c2
281 changeset: 7:80fe151401c2
282 user: test
282 user: test
283 date: Mon Jan 12 13:46:40 1970 +0000
283 date: Mon Jan 12 13:46:40 1970 +0000
284 summary: 1.3m
284 summary: 1.3m
285
285
286 ====== Unbundle incremental bundles into fresh empty in one go
286 ====== Unbundle incremental bundles into fresh empty in one go
287 1 changesets found
287 1 changesets found
288 1 changesets found
288 1 changesets found
289 adding changesets
289 adding changesets
290 adding manifests
290 adding manifests
291 adding file changes
291 adding file changes
292 added 1 changesets with 1 changes to 1 files
292 added 1 changesets with 1 changes to 1 files
293 adding changesets
293 adding changesets
294 adding manifests
294 adding manifests
295 adding file changes
295 adding file changes
296 added 1 changesets with 1 changes to 1 files
296 added 1 changesets with 1 changes to 1 files
297 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
297 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
298 ====== test for 540d1059c802
298 ====== test for 540d1059c802
299 updating to branch default
299 updating to branch default
300 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
300 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
301 searching for changes
301 searching for changes
302 1 changesets found
302 1 changesets found
303 comparing with ../bundle.hg
303 comparing with ../bundle.hg
304 searching for changes
304 searching for changes
305 changeset: 2:ed1b79f46b9a
305 changeset: 2:ed1b79f46b9a
306 tag: tip
306 tag: tip
307 parent: 0:bbd179dfa0a7
307 parent: 0:bbd179dfa0a7
308 user: test
308 user: test
309 date: Thu Jan 01 00:00:00 1970 +0000
309 date: Thu Jan 01 00:00:00 1970 +0000
310 summary: change foo
310 summary: change foo
311
311
312 ===== test that verify bundle does not traceback
312 ===== test that verify bundle does not traceback
313 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
313 abort: 00changelog.i@bbd179dfa0a7: unknown parent!
314 abort: cannot verify bundle or remote repos
314 abort: cannot verify bundle or remote repos
315 checking changesets
315 checking changesets
316 checking manifests
316 checking manifests
317 crosschecking files in changesets and manifests
317 crosschecking files in changesets and manifests
318 checking files
318 checking files
319 2 files, 2 changesets, 2 total revisions
319 2 files, 2 changesets, 2 total revisions
320 ====== diff against bundle
320 ====== diff against bundle
321 diff -r 836ac62537ab anotherfile
321 diff -r 836ac62537ab anotherfile
322 --- a/anotherfile Mon Jan 12 13:46:40 1970 +0000
322 --- a/anotherfile Mon Jan 12 13:46:40 1970 +0000
323 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
323 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
324 @@ -1,4 +0,0 @@
324 @@ -1,4 +0,0 @@
325 -0
325 -0
326 -1
326 -1
327 -2
327 -2
328 -3
328 -3
329 ====== bundle single branch
330 adding a
331 adding b
332 adding b1
333 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
334 adding c
335 created new head
336 adding c1
337 == bundling via incoming
338 comparing with .
339 searching for changes
340 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
341 5ece8e77363e2b5269e27c66828b72da29e4341a
342 == bundling
343 searching for changes
344 common changesets up to c0025332f9ed
345 2 changesets found
346 list of changesets:
347 d2ae7f538514cd87c17547b0de4cea71fe1af9fb
348 5ece8e77363e2b5269e27c66828b72da29e4341a
@@ -1,219 +1,219
1 notify extension - hooks for sending email notifications at commit/push time
1 notify extension - hooks for sending email notifications at commit/push time
2
2
3 Subscriptions can be managed through a hgrc file. Default mode is to print
3 Subscriptions can be managed through a hgrc file. Default mode is to print
4 messages to stdout, for testing and configuring.
4 messages to stdout, for testing and configuring.
5
5
6 To use, configure the notify extension and enable it in hgrc like this:
6 To use, configure the notify extension and enable it in hgrc like this:
7
7
8 [extensions]
8 [extensions]
9 hgext.notify =
9 hgext.notify =
10
10
11 [hooks]
11 [hooks]
12 # one email for each incoming changeset
12 # one email for each incoming changeset
13 incoming.notify = python:hgext.notify.hook
13 incoming.notify = python:hgext.notify.hook
14 # batch emails when many changesets incoming at one time
14 # batch emails when many changesets incoming at one time
15 changegroup.notify = python:hgext.notify.hook
15 changegroup.notify = python:hgext.notify.hook
16
16
17 [notify]
17 [notify]
18 # config items go here
18 # config items go here
19
19
20 Required configuration items:
20 Required configuration items:
21
21
22 config = /path/to/file # file containing subscriptions
22 config = /path/to/file # file containing subscriptions
23
23
24 Optional configuration items:
24 Optional configuration items:
25
25
26 test = True # print messages to stdout for testing
26 test = True # print messages to stdout for testing
27 strip = 3 # number of slashes to strip for url paths
27 strip = 3 # number of slashes to strip for url paths
28 domain = example.com # domain to use if committer missing domain
28 domain = example.com # domain to use if committer missing domain
29 style = ... # style file to use when formatting email
29 style = ... # style file to use when formatting email
30 template = ... # template to use when formatting email
30 template = ... # template to use when formatting email
31 incoming = ... # template to use when run as incoming hook
31 incoming = ... # template to use when run as incoming hook
32 changegroup = ... # template when run as changegroup hook
32 changegroup = ... # template when run as changegroup hook
33 maxdiff = 300 # max lines of diffs to include (0=none, -1=all)
33 maxdiff = 300 # max lines of diffs to include (0=none, -1=all)
34 maxsubject = 67 # truncate subject line longer than this
34 maxsubject = 67 # truncate subject line longer than this
35 diffstat = True # add a diffstat before the diff content
35 diffstat = True # add a diffstat before the diff content
36 sources = serve # notify if source of incoming changes in this list
36 sources = serve # notify if source of incoming changes in this list
37 # (serve == ssh or http, push, pull, bundle)
37 # (serve == ssh or http, push, pull, bundle)
38 merge = False # send notification for merges (default True)
38 merge = False # send notification for merges (default True)
39 [email]
39 [email]
40 from = user@host.com # email address to send as if none given
40 from = user@host.com # email address to send as if none given
41 [web]
41 [web]
42 baseurl = http://hgserver/... # root of hg web site for browsing commits
42 baseurl = http://hgserver/... # root of hg web site for browsing commits
43
43
44 The notify config file has same format as a regular hgrc file. It has two
44 The notify config file has same format as a regular hgrc file. It has two
45 sections so you can express subscriptions in whatever way is handier for you.
45 sections so you can express subscriptions in whatever way is handier for you.
46
46
47 [usersubs]
47 [usersubs]
48 # key is subscriber email, value is ","-separated list of glob patterns
48 # key is subscriber email, value is ","-separated list of glob patterns
49 user@host = pattern
49 user@host = pattern
50
50
51 [reposubs]
51 [reposubs]
52 # key is glob pattern, value is ","-separated list of subscriber emails
52 # key is glob pattern, value is ","-separated list of subscriber emails
53 pattern = user@host
53 pattern = user@host
54
54
55 Glob patterns are matched against path to repository root.
55 Glob patterns are matched against path to repository root.
56
56
57 If you like, you can put notify config file in repository that users can push
57 If you like, you can put notify config file in repository that users can push
58 changes to, they can manage their own subscriptions.
58 changes to, they can manage their own subscriptions.
59
59
60 no commands defined
60 no commands defined
61 % commit
61 % commit
62 adding a
62 adding a
63 % clone
63 % clone
64 updating to branch default
64 updating to branch default
65 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
65 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 % commit
66 % commit
67 % pull (minimal config)
67 % pull (minimal config)
68 pulling from ../a
68 pulling from ../a
69 searching for changes
69 searching for changes
70 adding changesets
70 adding changesets
71 adding manifests
71 adding manifests
72 adding file changes
72 adding file changes
73 added 1 changesets with 1 changes to 1 files
73 added 1 changesets with 1 changes to 1 files
74 Content-Type: text/plain; charset="us-ascii"
74 Content-Type: text/plain; charset="us-ascii"
75 MIME-Version: 1.0
75 MIME-Version: 1.0
76 Content-Transfer-Encoding: 7bit
76 Content-Transfer-Encoding: 7bit
77 Date:
77 Date:
78 Subject: changeset in test-notify/b: b
78 Subject: changeset in test-notify/b: b
79 From: test
79 From: test
80 X-Hg-Notification: changeset 0647d048b600
80 X-Hg-Notification: changeset 0647d048b600
81 Message-Id:
81 Message-Id:
82 To: baz, foo@bar
82 To: baz, foo@bar
83
83
84 changeset 0647d048b600 in test-notify/b
84 changeset 0647d048b600 in test-notify/b
85 details: test-notify/b?cmd=changeset;node=0647d048b600
85 details: test-notify/b?cmd=changeset;node=0647d048b600
86 description: b
86 description: b
87
87
88 diffs (6 lines):
88 diffs (6 lines):
89
89
90 diff -r cb9a9f314b8b -r 0647d048b600 a
90 diff -r cb9a9f314b8b -r 0647d048b600 a
91 --- a/a Thu Jan 01 00:00:00 1970 +0000
91 --- a/a Thu Jan 01 00:00:00 1970 +0000
92 +++ b/a Thu Jan 01 00:00:01 1970 +0000
92 +++ b/a Thu Jan 01 00:00:01 1970 +0000
93 @@ -1,1 +1,2 @@
93 @@ -1,1 +1,2 @@
94 a
94 a
95 +a
95 +a
96 (run 'hg update' to get a working copy)
96 (run 'hg update' to get a working copy)
97 % fail for config file is missing
97 % fail for config file is missing
98 rolling back last transaction
98 rolling back last transaction
99 pull failed
99 pull failed
100 % pull
100 % pull
101 rolling back last transaction
101 rolling back last transaction
102 pulling from ../a
102 pulling from ../a
103 searching for changes
103 searching for changes
104 adding changesets
104 adding changesets
105 adding manifests
105 adding manifests
106 adding file changes
106 adding file changes
107 added 1 changesets with 1 changes to 1 files
107 added 1 changesets with 1 changes to 1 files
108 Content-Type: text/plain; charset="us-ascii"
108 Content-Type: text/plain; charset="us-ascii"
109 MIME-Version: 1.0
109 MIME-Version: 1.0
110 Content-Transfer-Encoding: 7bit
110 Content-Transfer-Encoding: 7bit
111 X-Test: foo
111 X-Test: foo
112 Date:
112 Date:
113 Subject: b
113 Subject: b
114 From: test@test.com
114 From: test@test.com
115 X-Hg-Notification: changeset 0647d048b600
115 X-Hg-Notification: changeset 0647d048b600
116 Message-Id:
116 Message-Id:
117 To: baz@test.com, foo@bar
117 To: baz@test.com, foo@bar
118
118
119 changeset 0647d048b600
119 changeset 0647d048b600
120 description:
120 description:
121 b
121 b
122 diffs (6 lines):
122 diffs (6 lines):
123
123
124 diff -r cb9a9f314b8b -r 0647d048b600 a
124 diff -r cb9a9f314b8b -r 0647d048b600 a
125 --- a/a Thu Jan 01 00:00:00 1970 +0000
125 --- a/a Thu Jan 01 00:00:00 1970 +0000
126 +++ b/a Thu Jan 01 00:00:01 1970 +0000
126 +++ b/a Thu Jan 01 00:00:01 1970 +0000
127 @@ -1,1 +1,2 @@
127 @@ -1,1 +1,2 @@
128 a
128 a
129 +a
129 +a
130 (run 'hg update' to get a working copy)
130 (run 'hg update' to get a working copy)
131 % pull
131 % pull
132 rolling back last transaction
132 rolling back last transaction
133 pulling from ../a
133 pulling from ../a
134 searching for changes
134 searching for changes
135 adding changesets
135 adding changesets
136 adding manifests
136 adding manifests
137 adding file changes
137 adding file changes
138 added 1 changesets with 1 changes to 1 files
138 added 1 changesets with 1 changes to 1 files
139 Content-Type: text/plain; charset="us-ascii"
139 Content-Type: text/plain; charset="us-ascii"
140 MIME-Version: 1.0
140 MIME-Version: 1.0
141 Content-Transfer-Encoding: 7bit
141 Content-Transfer-Encoding: 7bit
142 X-Test: foo
142 X-Test: foo
143 Date:
143 Date:
144 Subject: b
144 Subject: b
145 From: test@test.com
145 From: test@test.com
146 X-Hg-Notification: changeset 0647d048b600
146 X-Hg-Notification: changeset 0647d048b600
147 Message-Id:
147 Message-Id:
148 To: baz@test.com, foo@bar
148 To: baz@test.com, foo@bar
149
149
150 changeset 0647d048b600
150 changeset 0647d048b600
151 description:
151 description:
152 b
152 b
153 diffstat:
153 diffstat:
154
154
155 a | 1 +
155 a | 1 +
156 1 files changed, 1 insertions(+), 0 deletions(-)
156 1 files changed, 1 insertions(+), 0 deletions(-)
157
157
158 diffs (6 lines):
158 diffs (6 lines):
159
159
160 diff -r cb9a9f314b8b -r 0647d048b600 a
160 diff -r cb9a9f314b8b -r 0647d048b600 a
161 --- a/a Thu Jan 01 00:00:00 1970 +0000
161 --- a/a Thu Jan 01 00:00:00 1970 +0000
162 +++ b/a Thu Jan 01 00:00:01 1970 +0000
162 +++ b/a Thu Jan 01 00:00:01 1970 +0000
163 @@ -1,1 +1,2 @@
163 @@ -1,1 +1,2 @@
164 a
164 a
165 +a
165 +a
166 (run 'hg update' to get a working copy)
166 (run 'hg update' to get a working copy)
167 % test merge
167 % test merge
168 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
168 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 created new head
169 created new head
170 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
170 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
171 (branch merge, don't forget to commit)
171 (branch merge, don't forget to commit)
172 pulling from ../a
172 pulling from ../a
173 searching for changes
173 searching for changes
174 adding changesets
174 adding changesets
175 adding manifests
175 adding manifests
176 adding file changes
176 adding file changes
177 added 2 changesets with 0 changes to 1 files
177 added 2 changesets with 0 changes to 0 files
178 Content-Type: text/plain; charset="us-ascii"
178 Content-Type: text/plain; charset="us-ascii"
179 MIME-Version: 1.0
179 MIME-Version: 1.0
180 Content-Transfer-Encoding: 7bit
180 Content-Transfer-Encoding: 7bit
181 X-Test: foo
181 X-Test: foo
182 Date:
182 Date:
183 Subject: adda2
183 Subject: adda2
184 From: test@test.com
184 From: test@test.com
185 X-Hg-Notification: changeset 0a184ce6067f
185 X-Hg-Notification: changeset 0a184ce6067f
186 Message-Id:
186 Message-Id:
187 To: baz@test.com, foo@bar
187 To: baz@test.com, foo@bar
188
188
189 changeset 0a184ce6067f
189 changeset 0a184ce6067f
190 description:
190 description:
191 adda2
191 adda2
192 diffstat:
192 diffstat:
193
193
194 a | 1 +
194 a | 1 +
195 1 files changed, 1 insertions(+), 0 deletions(-)
195 1 files changed, 1 insertions(+), 0 deletions(-)
196
196
197 diffs (6 lines):
197 diffs (6 lines):
198
198
199 diff -r cb9a9f314b8b -r 0a184ce6067f a
199 diff -r cb9a9f314b8b -r 0a184ce6067f a
200 --- a/a Thu Jan 01 00:00:00 1970 +0000
200 --- a/a Thu Jan 01 00:00:00 1970 +0000
201 +++ b/a Thu Jan 01 00:00:02 1970 +0000
201 +++ b/a Thu Jan 01 00:00:02 1970 +0000
202 @@ -1,1 +1,2 @@
202 @@ -1,1 +1,2 @@
203 a
203 a
204 +a
204 +a
205 Content-Type: text/plain; charset="us-ascii"
205 Content-Type: text/plain; charset="us-ascii"
206 MIME-Version: 1.0
206 MIME-Version: 1.0
207 Content-Transfer-Encoding: 7bit
207 Content-Transfer-Encoding: 7bit
208 X-Test: foo
208 X-Test: foo
209 Date:
209 Date:
210 Subject: merge
210 Subject: merge
211 From: test@test.com
211 From: test@test.com
212 X-Hg-Notification: changeset 22c88b85aa27
212 X-Hg-Notification: changeset 22c88b85aa27
213 Message-Id:
213 Message-Id:
214 To: baz@test.com, foo@bar
214 To: baz@test.com, foo@bar
215
215
216 changeset 22c88b85aa27
216 changeset 22c88b85aa27
217 description:
217 description:
218 merge
218 merge
219 (run 'hg update' to get a working copy)
219 (run 'hg update' to get a working copy)
General Comments 0
You need to be logged in to leave comments. Login now