ChainerCV training data processing












0












$begingroup$


I have an imageset of 250 images of shape (3, 320, 240) and 250 annotation files and a ImageSet/Main folder containing train, val, test text files which has lists of images for train, test and validation respectively.
I am using ChainerCV to detect and recognize two classes in the image: ball and player. Here we are using SSD300 model pre-trained on ImageNet dataset.



CLASS TO CREATE DATASET OBJECT



bball_labels = ('ball','player')
class BBall_dataset(VOCBboxDataset):
def _get_annotations(self, i):
id_ = self.ids[i]
anno = ET.parse(os.path.join(self.data_dir, 'Annotations', id_ +
'.xml'))
bbox =
label =
difficult =
for obj in anno.findall('object'):
bndbox_anno = obj.find('bndbox')
bbox.append([int(bndbox_anno.find(tag).text) - 1 for tag in ('ymin',
'xmin', 'ymax', 'xmax')])
name = obj.find('name').text.lower().strip()
label.append(bball_labels.index(name))
bbox = np.stack(bbox).astype(np.float32)
label = np.stack(label).astype(np.int32)
difficult = np.array(difficult, dtype=np.bool)
return bbox, label, difficult

valid_dataset = BBall_dataset('ExpDataset', 'val')
test_dataset = BBall_dataset('ExpDataset', 'test')
train_dataset = BBall_dataset('ExpDataset', 'train')


Here train_dataset is an array containing img data((3,240,320),float32), bbox data((4,4),float32) and label data((4,),int32).



DOWNLOAD PRE-TRAINED MODEL



import chainer
from chainercv.links import SSD300
from chainercv.links.model.ssd import multibox_loss

class MultiboxTrainChain(chainer.Chain):
def __init__(self, model, alpha=1, k=3):
super(MultiboxTrainChain, self).__init__()
with self.init_scope():
self.model = model
self.alpha = alpha
self.k = k
def forward(self, imgs, gt_mb_locs, gt_mb_labels):
mb_locs, mb_confs = self.model(imgs)
loc_loss, conf_loss = multibox_loss(
mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)
loss = loc_loss * self.alpha + conf_loss

chainer.reporter.report(
{'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},
self)
return loss

model = SSD300(n_fg_class=len(bball_labels), pretrained_model='imagenet')
train_chain = MultiboxTrainChain(model)


TRANSFORM DATASET:



class Transform(object):
def __init__(self, coder, size, mean):
self.coder = copy.copy(coder)
self.coder.to_cpu()

self.size = size
self.mean = mean
def __call__(self, in_data):
img, bbox, label = in_data
img = random_distort(img)
if np.random.randint(2):
img, param = transforms.random_expand(img, fill=self.mean,
return_param=True)
bbox = transforms.translate_bbox(bbox, y_offset=param['y_offset'],
x_offset=param['x_offset'])
img, param = random_crop_with_bbox_constraints(img, bbox,
return_param=True)
bbox, param = transforms.crop_bbox(bbox, y_slice=param['y_slice'],
x_slice=param['x_slice'],allow_outside_center=False, return_param=True)
label = label[param['index']]

_, H, W = img.shape
img = resize_with_random_interpolation(img, (self.size, self.size))
bbox = transforms.resize_bbox(bbox, (H, W), (self.size, self.size))

img, params = transforms.random_flip(img, x_random=True,
return_param=True)
bbox = transforms.flip_bbox(bbox, (self.size, self.size),
x_flip=params['x_flip'])

img -= self.mean
mb_loc, mb_label = self.coder.encode(bbox, label)

return img, mb_loc, mb_label
transformed_train_dataset = TransformDataset(train_dataset,
Transform(model.coder, model.insize, model.mean))

train_iter =
chainer.iterators.MultiprocessIterator(transformed_train_dataset,
batchsize)
valid_iter = chainer.iterators.SerialIterator(valid_dataset,
batchsize,
repeat=False, shuffle=False)


During training it throws the following error:



Exception in thread Thread-4:
Traceback (most recent call last):
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File "/usr/local/lib/python3.6/dist-
packages/chainer/iterators/multiprocess_iterator.py", line 401, in
fetch_batch
batch_ret[0] = [self.dataset[idx] for idx in indices]
File "/usr/local/lib/python3.6/dist-
........................................................................
packages/chainer/iterators/multiprocess_iterator.py", line 401, in
<listcomp>
batch_ret[0] = [self.dataset[idx] for idx in indices]
File "/usr/local/lib/python3.6/dist-
packages/chainer/dataset/dataset_mixin.py", line 67, in __getitem__
return self.get_example(index)
File "/usr/local/lib/python3.6/dist-
packages/chainer/datasets/transform_dataset.py", line 51, in get_example
in_data = self._dataset[i]
File "/usr/local/lib/python3.6/dist-
packages/chainer/dataset/dataset_mixin.py", line 67, in __getitem__
return self.get_example(index)
File "/usr/local/lib/python3.6/dist--
packages/chainercv/utils/image/read_image.py", line 120, in read_image
return _read_image_cv2(path, dtype, color, alpha)
File "/usr/local/lib/python3.6/dist-
packages/chainercv/utils/image/read_image.py", line 49, in _read_image_cv2
if img.ndim == 2:
AttributeError: 'NoneType' object has no attribute 'ndim'
TypeError: 'NoneType' object is not iterable


Is train_dataset format incorrect in this case? The errors say NoneType. I want to know the correct format for feeding the dataset into the model.









share









$endgroup$

















    0












    $begingroup$


    I have an imageset of 250 images of shape (3, 320, 240) and 250 annotation files and a ImageSet/Main folder containing train, val, test text files which has lists of images for train, test and validation respectively.
    I am using ChainerCV to detect and recognize two classes in the image: ball and player. Here we are using SSD300 model pre-trained on ImageNet dataset.



    CLASS TO CREATE DATASET OBJECT



    bball_labels = ('ball','player')
    class BBall_dataset(VOCBboxDataset):
    def _get_annotations(self, i):
    id_ = self.ids[i]
    anno = ET.parse(os.path.join(self.data_dir, 'Annotations', id_ +
    '.xml'))
    bbox =
    label =
    difficult =
    for obj in anno.findall('object'):
    bndbox_anno = obj.find('bndbox')
    bbox.append([int(bndbox_anno.find(tag).text) - 1 for tag in ('ymin',
    'xmin', 'ymax', 'xmax')])
    name = obj.find('name').text.lower().strip()
    label.append(bball_labels.index(name))
    bbox = np.stack(bbox).astype(np.float32)
    label = np.stack(label).astype(np.int32)
    difficult = np.array(difficult, dtype=np.bool)
    return bbox, label, difficult

    valid_dataset = BBall_dataset('ExpDataset', 'val')
    test_dataset = BBall_dataset('ExpDataset', 'test')
    train_dataset = BBall_dataset('ExpDataset', 'train')


    Here train_dataset is an array containing img data((3,240,320),float32), bbox data((4,4),float32) and label data((4,),int32).



    DOWNLOAD PRE-TRAINED MODEL



    import chainer
    from chainercv.links import SSD300
    from chainercv.links.model.ssd import multibox_loss

    class MultiboxTrainChain(chainer.Chain):
    def __init__(self, model, alpha=1, k=3):
    super(MultiboxTrainChain, self).__init__()
    with self.init_scope():
    self.model = model
    self.alpha = alpha
    self.k = k
    def forward(self, imgs, gt_mb_locs, gt_mb_labels):
    mb_locs, mb_confs = self.model(imgs)
    loc_loss, conf_loss = multibox_loss(
    mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)
    loss = loc_loss * self.alpha + conf_loss

    chainer.reporter.report(
    {'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},
    self)
    return loss

    model = SSD300(n_fg_class=len(bball_labels), pretrained_model='imagenet')
    train_chain = MultiboxTrainChain(model)


    TRANSFORM DATASET:



    class Transform(object):
    def __init__(self, coder, size, mean):
    self.coder = copy.copy(coder)
    self.coder.to_cpu()

    self.size = size
    self.mean = mean
    def __call__(self, in_data):
    img, bbox, label = in_data
    img = random_distort(img)
    if np.random.randint(2):
    img, param = transforms.random_expand(img, fill=self.mean,
    return_param=True)
    bbox = transforms.translate_bbox(bbox, y_offset=param['y_offset'],
    x_offset=param['x_offset'])
    img, param = random_crop_with_bbox_constraints(img, bbox,
    return_param=True)
    bbox, param = transforms.crop_bbox(bbox, y_slice=param['y_slice'],
    x_slice=param['x_slice'],allow_outside_center=False, return_param=True)
    label = label[param['index']]

    _, H, W = img.shape
    img = resize_with_random_interpolation(img, (self.size, self.size))
    bbox = transforms.resize_bbox(bbox, (H, W), (self.size, self.size))

    img, params = transforms.random_flip(img, x_random=True,
    return_param=True)
    bbox = transforms.flip_bbox(bbox, (self.size, self.size),
    x_flip=params['x_flip'])

    img -= self.mean
    mb_loc, mb_label = self.coder.encode(bbox, label)

    return img, mb_loc, mb_label
    transformed_train_dataset = TransformDataset(train_dataset,
    Transform(model.coder, model.insize, model.mean))

    train_iter =
    chainer.iterators.MultiprocessIterator(transformed_train_dataset,
    batchsize)
    valid_iter = chainer.iterators.SerialIterator(valid_dataset,
    batchsize,
    repeat=False, shuffle=False)


    During training it throws the following error:



    Exception in thread Thread-4:
    Traceback (most recent call last):
    File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
    self.run()
    File "/usr/lib/python3.6/threading.py", line 864, in run
    self._target(*self._args, **self._kwargs)
    File "/usr/local/lib/python3.6/dist-
    packages/chainer/iterators/multiprocess_iterator.py", line 401, in
    fetch_batch
    batch_ret[0] = [self.dataset[idx] for idx in indices]
    File "/usr/local/lib/python3.6/dist-
    ........................................................................
    packages/chainer/iterators/multiprocess_iterator.py", line 401, in
    <listcomp>
    batch_ret[0] = [self.dataset[idx] for idx in indices]
    File "/usr/local/lib/python3.6/dist-
    packages/chainer/dataset/dataset_mixin.py", line 67, in __getitem__
    return self.get_example(index)
    File "/usr/local/lib/python3.6/dist-
    packages/chainer/datasets/transform_dataset.py", line 51, in get_example
    in_data = self._dataset[i]
    File "/usr/local/lib/python3.6/dist-
    packages/chainer/dataset/dataset_mixin.py", line 67, in __getitem__
    return self.get_example(index)
    File "/usr/local/lib/python3.6/dist--
    packages/chainercv/utils/image/read_image.py", line 120, in read_image
    return _read_image_cv2(path, dtype, color, alpha)
    File "/usr/local/lib/python3.6/dist-
    packages/chainercv/utils/image/read_image.py", line 49, in _read_image_cv2
    if img.ndim == 2:
    AttributeError: 'NoneType' object has no attribute 'ndim'
    TypeError: 'NoneType' object is not iterable


    Is train_dataset format incorrect in this case? The errors say NoneType. I want to know the correct format for feeding the dataset into the model.









    share









    $endgroup$















      0












      0








      0





      $begingroup$


      I have an imageset of 250 images of shape (3, 320, 240) and 250 annotation files and a ImageSet/Main folder containing train, val, test text files which has lists of images for train, test and validation respectively.
      I am using ChainerCV to detect and recognize two classes in the image: ball and player. Here we are using SSD300 model pre-trained on ImageNet dataset.



      CLASS TO CREATE DATASET OBJECT



      bball_labels = ('ball','player')
      class BBall_dataset(VOCBboxDataset):
      def _get_annotations(self, i):
      id_ = self.ids[i]
      anno = ET.parse(os.path.join(self.data_dir, 'Annotations', id_ +
      '.xml'))
      bbox =
      label =
      difficult =
      for obj in anno.findall('object'):
      bndbox_anno = obj.find('bndbox')
      bbox.append([int(bndbox_anno.find(tag).text) - 1 for tag in ('ymin',
      'xmin', 'ymax', 'xmax')])
      name = obj.find('name').text.lower().strip()
      label.append(bball_labels.index(name))
      bbox = np.stack(bbox).astype(np.float32)
      label = np.stack(label).astype(np.int32)
      difficult = np.array(difficult, dtype=np.bool)
      return bbox, label, difficult

      valid_dataset = BBall_dataset('ExpDataset', 'val')
      test_dataset = BBall_dataset('ExpDataset', 'test')
      train_dataset = BBall_dataset('ExpDataset', 'train')


      Here train_dataset is an array containing img data((3,240,320),float32), bbox data((4,4),float32) and label data((4,),int32).



      DOWNLOAD PRE-TRAINED MODEL



      import chainer
      from chainercv.links import SSD300
      from chainercv.links.model.ssd import multibox_loss

      class MultiboxTrainChain(chainer.Chain):
      def __init__(self, model, alpha=1, k=3):
      super(MultiboxTrainChain, self).__init__()
      with self.init_scope():
      self.model = model
      self.alpha = alpha
      self.k = k
      def forward(self, imgs, gt_mb_locs, gt_mb_labels):
      mb_locs, mb_confs = self.model(imgs)
      loc_loss, conf_loss = multibox_loss(
      mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)
      loss = loc_loss * self.alpha + conf_loss

      chainer.reporter.report(
      {'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},
      self)
      return loss

      model = SSD300(n_fg_class=len(bball_labels), pretrained_model='imagenet')
      train_chain = MultiboxTrainChain(model)


      TRANSFORM DATASET:



      class Transform(object):
      def __init__(self, coder, size, mean):
      self.coder = copy.copy(coder)
      self.coder.to_cpu()

      self.size = size
      self.mean = mean
      def __call__(self, in_data):
      img, bbox, label = in_data
      img = random_distort(img)
      if np.random.randint(2):
      img, param = transforms.random_expand(img, fill=self.mean,
      return_param=True)
      bbox = transforms.translate_bbox(bbox, y_offset=param['y_offset'],
      x_offset=param['x_offset'])
      img, param = random_crop_with_bbox_constraints(img, bbox,
      return_param=True)
      bbox, param = transforms.crop_bbox(bbox, y_slice=param['y_slice'],
      x_slice=param['x_slice'],allow_outside_center=False, return_param=True)
      label = label[param['index']]

      _, H, W = img.shape
      img = resize_with_random_interpolation(img, (self.size, self.size))
      bbox = transforms.resize_bbox(bbox, (H, W), (self.size, self.size))

      img, params = transforms.random_flip(img, x_random=True,
      return_param=True)
      bbox = transforms.flip_bbox(bbox, (self.size, self.size),
      x_flip=params['x_flip'])

      img -= self.mean
      mb_loc, mb_label = self.coder.encode(bbox, label)

      return img, mb_loc, mb_label
      transformed_train_dataset = TransformDataset(train_dataset,
      Transform(model.coder, model.insize, model.mean))

      train_iter =
      chainer.iterators.MultiprocessIterator(transformed_train_dataset,
      batchsize)
      valid_iter = chainer.iterators.SerialIterator(valid_dataset,
      batchsize,
      repeat=False, shuffle=False)


      During training it throws the following error:



      Exception in thread Thread-4:
      Traceback (most recent call last):
      File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
      self.run()
      File "/usr/lib/python3.6/threading.py", line 864, in run
      self._target(*self._args, **self._kwargs)
      File "/usr/local/lib/python3.6/dist-
      packages/chainer/iterators/multiprocess_iterator.py", line 401, in
      fetch_batch
      batch_ret[0] = [self.dataset[idx] for idx in indices]
      File "/usr/local/lib/python3.6/dist-
      ........................................................................
      packages/chainer/iterators/multiprocess_iterator.py", line 401, in
      <listcomp>
      batch_ret[0] = [self.dataset[idx] for idx in indices]
      File "/usr/local/lib/python3.6/dist-
      packages/chainer/dataset/dataset_mixin.py", line 67, in __getitem__
      return self.get_example(index)
      File "/usr/local/lib/python3.6/dist-
      packages/chainer/datasets/transform_dataset.py", line 51, in get_example
      in_data = self._dataset[i]
      File "/usr/local/lib/python3.6/dist-
      packages/chainer/dataset/dataset_mixin.py", line 67, in __getitem__
      return self.get_example(index)
      File "/usr/local/lib/python3.6/dist--
      packages/chainercv/utils/image/read_image.py", line 120, in read_image
      return _read_image_cv2(path, dtype, color, alpha)
      File "/usr/local/lib/python3.6/dist-
      packages/chainercv/utils/image/read_image.py", line 49, in _read_image_cv2
      if img.ndim == 2:
      AttributeError: 'NoneType' object has no attribute 'ndim'
      TypeError: 'NoneType' object is not iterable


      Is train_dataset format incorrect in this case? The errors say NoneType. I want to know the correct format for feeding the dataset into the model.









      share









      $endgroup$




      I have an imageset of 250 images of shape (3, 320, 240) and 250 annotation files and a ImageSet/Main folder containing train, val, test text files which has lists of images for train, test and validation respectively.
      I am using ChainerCV to detect and recognize two classes in the image: ball and player. Here we are using SSD300 model pre-trained on ImageNet dataset.



      CLASS TO CREATE DATASET OBJECT



      bball_labels = ('ball','player')
      class BBall_dataset(VOCBboxDataset):
      def _get_annotations(self, i):
      id_ = self.ids[i]
      anno = ET.parse(os.path.join(self.data_dir, 'Annotations', id_ +
      '.xml'))
      bbox =
      label =
      difficult =
      for obj in anno.findall('object'):
      bndbox_anno = obj.find('bndbox')
      bbox.append([int(bndbox_anno.find(tag).text) - 1 for tag in ('ymin',
      'xmin', 'ymax', 'xmax')])
      name = obj.find('name').text.lower().strip()
      label.append(bball_labels.index(name))
      bbox = np.stack(bbox).astype(np.float32)
      label = np.stack(label).astype(np.int32)
      difficult = np.array(difficult, dtype=np.bool)
      return bbox, label, difficult

      valid_dataset = BBall_dataset('ExpDataset', 'val')
      test_dataset = BBall_dataset('ExpDataset', 'test')
      train_dataset = BBall_dataset('ExpDataset', 'train')


      Here train_dataset is an array containing img data((3,240,320),float32), bbox data((4,4),float32) and label data((4,),int32).



      DOWNLOAD PRE-TRAINED MODEL



      import chainer
      from chainercv.links import SSD300
      from chainercv.links.model.ssd import multibox_loss

      class MultiboxTrainChain(chainer.Chain):
      def __init__(self, model, alpha=1, k=3):
      super(MultiboxTrainChain, self).__init__()
      with self.init_scope():
      self.model = model
      self.alpha = alpha
      self.k = k
      def forward(self, imgs, gt_mb_locs, gt_mb_labels):
      mb_locs, mb_confs = self.model(imgs)
      loc_loss, conf_loss = multibox_loss(
      mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)
      loss = loc_loss * self.alpha + conf_loss

      chainer.reporter.report(
      {'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},
      self)
      return loss

      model = SSD300(n_fg_class=len(bball_labels), pretrained_model='imagenet')
      train_chain = MultiboxTrainChain(model)


      TRANSFORM DATASET:



      class Transform(object):
      def __init__(self, coder, size, mean):
      self.coder = copy.copy(coder)
      self.coder.to_cpu()

      self.size = size
      self.mean = mean
      def __call__(self, in_data):
      img, bbox, label = in_data
      img = random_distort(img)
      if np.random.randint(2):
      img, param = transforms.random_expand(img, fill=self.mean,
      return_param=True)
      bbox = transforms.translate_bbox(bbox, y_offset=param['y_offset'],
      x_offset=param['x_offset'])
      img, param = random_crop_with_bbox_constraints(img, bbox,
      return_param=True)
      bbox, param = transforms.crop_bbox(bbox, y_slice=param['y_slice'],
      x_slice=param['x_slice'],allow_outside_center=False, return_param=True)
      label = label[param['index']]

      _, H, W = img.shape
      img = resize_with_random_interpolation(img, (self.size, self.size))
      bbox = transforms.resize_bbox(bbox, (H, W), (self.size, self.size))

      img, params = transforms.random_flip(img, x_random=True,
      return_param=True)
      bbox = transforms.flip_bbox(bbox, (self.size, self.size),
      x_flip=params['x_flip'])

      img -= self.mean
      mb_loc, mb_label = self.coder.encode(bbox, label)

      return img, mb_loc, mb_label
      transformed_train_dataset = TransformDataset(train_dataset,
      Transform(model.coder, model.insize, model.mean))

      train_iter =
      chainer.iterators.MultiprocessIterator(transformed_train_dataset,
      batchsize)
      valid_iter = chainer.iterators.SerialIterator(valid_dataset,
      batchsize,
      repeat=False, shuffle=False)


      During training it throws the following error:



      Exception in thread Thread-4:
      Traceback (most recent call last):
      File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
      self.run()
      File "/usr/lib/python3.6/threading.py", line 864, in run
      self._target(*self._args, **self._kwargs)
      File "/usr/local/lib/python3.6/dist-
      packages/chainer/iterators/multiprocess_iterator.py", line 401, in
      fetch_batch
      batch_ret[0] = [self.dataset[idx] for idx in indices]
      File "/usr/local/lib/python3.6/dist-
      ........................................................................
      packages/chainer/iterators/multiprocess_iterator.py", line 401, in
      <listcomp>
      batch_ret[0] = [self.dataset[idx] for idx in indices]
      File "/usr/local/lib/python3.6/dist-
      packages/chainer/dataset/dataset_mixin.py", line 67, in __getitem__
      return self.get_example(index)
      File "/usr/local/lib/python3.6/dist-
      packages/chainer/datasets/transform_dataset.py", line 51, in get_example
      in_data = self._dataset[i]
      File "/usr/local/lib/python3.6/dist-
      packages/chainer/dataset/dataset_mixin.py", line 67, in __getitem__
      return self.get_example(index)
      File "/usr/local/lib/python3.6/dist--
      packages/chainercv/utils/image/read_image.py", line 120, in read_image
      return _read_image_cv2(path, dtype, color, alpha)
      File "/usr/local/lib/python3.6/dist-
      packages/chainercv/utils/image/read_image.py", line 49, in _read_image_cv2
      if img.ndim == 2:
      AttributeError: 'NoneType' object has no attribute 'ndim'
      TypeError: 'NoneType' object is not iterable


      Is train_dataset format incorrect in this case? The errors say NoneType. I want to know the correct format for feeding the dataset into the model.







      python deep-learning object-detection





      share












      share










      share



      share










      asked 4 mins ago









      Neerajan SahaNeerajan Saha

      64




      64






















          0






          active

          oldest

          votes











          Your Answer





          StackExchange.ifUsing("editor", function () {
          return StackExchange.using("mathjaxEditing", function () {
          StackExchange.MarkdownEditor.creationCallbacks.add(function (editor, postfix) {
          StackExchange.mathjaxEditing.prepareWmdForMathJax(editor, postfix, [["$", "$"], ["\\(","\\)"]]);
          });
          });
          }, "mathjax-editing");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "557"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: false,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: null,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdatascience.stackexchange.com%2fquestions%2f46684%2fchainercv-training-data-processing%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          0






          active

          oldest

          votes








          0






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes
















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Data Science Stack Exchange!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          Use MathJax to format equations. MathJax reference.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fdatascience.stackexchange.com%2fquestions%2f46684%2fchainercv-training-data-processing%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          Erzsébet Schaár

          Ponta tanko

          Tantalo (mitologio)