I have to agree with @Thugmek, you need to experiment your neural architecture. I'm going to put some code to his suggestion:
Thugnek's Suggestion: Add layers plus a few modifications to experiment with
// add layers to the model
model.add(tf.layers.dense({ units: 64, activation: 'relu', inputShape: [6] }));
model.add(tf.layers.dropout({ rate: 0.2 })); // Add dropout to prevent overfitting
model.add(tf.layers.dense({ units: 32, activation: 'relu' }));
model.add(tf.layers.dropout({ rate: 0.2 })); // Add dropout to prevent overfitting
// softmax because multiclass
model.add(tf.layers.dense({ units: uniqLabels.length, activation: 'softmax' }));
// try a better optimizer like adam
model.compile({
optimizer: 'adam', // Adam is a robust optimizer over SGD for deep learning
loss: 'sparseCategoricalCrossentropy',
metrics: ['accuracy']
});